diff --git a/go.mod b/go.mod index 2196a53..412f6fc 100644 --- a/go.mod +++ b/go.mod @@ -8,10 +8,10 @@ require ( github.com/mittwald/go-helm-client v0.12.14 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 - k8s.io/api v0.32.0 - k8s.io/apiextensions-apiserver v0.32.0 - k8s.io/apimachinery v0.32.0 - k8s.io/client-go v0.32.0 + k8s.io/api v0.32.1 + k8s.io/apiextensions-apiserver v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/client-go v0.32.1 k8s.io/klog/v2 v2.130.1 sigs.k8s.io/node-feature-discovery v0.15.4 sigs.k8s.io/yaml v1.4.0 @@ -142,9 +142,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect helm.sh/helm/v3 v3.16.1 // indirect - k8s.io/apiserver v0.32.0 // indirect + k8s.io/apiserver v0.32.1 // indirect k8s.io/cli-runtime v0.31.1 // indirect - k8s.io/component-base v0.32.0 // indirect + k8s.io/component-base v0.32.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/kubectl v0.31.0 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect diff --git a/go.sum b/go.sum index a213812..43ff994 100644 --- a/go.sum +++ b/go.sum @@ -433,20 +433,20 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= helm.sh/helm/v3 v3.16.1 h1:cER6tI/8PgUAsaJaQCVBUg3VI9KN4oVaZJgY60RIc0c= helm.sh/helm/v3 v3.16.1/go.mod h1:r+xBHHP20qJeEqtvBXMf7W35QDJnzY/eiEBzt+TfHps= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= -k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= -k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= -k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= -k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= +k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= +k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= -k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index 13be7cb..e802a01 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -675,7 +675,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index e3d7fd8..fb4d704 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -687,7 +687,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -715,9 +715,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ReservedForMaxSize is the maximum number of entries in +// ResourceClaimReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 +const ResourceClaimReservedForMaxSize = 256 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 1a71d64..b41609d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -291,7 +291,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.proto b/vendor/k8s.io/api/resource/v1beta1/generated.proto index 6d525d5..4ea13e0 100644 --- a/vendor/k8s.io/api/resource/v1beta1/generated.proto +++ b/vendor/k8s.io/api/resource/v1beta1/generated.proto @@ -683,7 +683,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go index a7f1ee7..ca79c5a 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types.go +++ b/vendor/k8s.io/api/resource/v1beta1/types.go @@ -695,7 +695,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -723,9 +723,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ReservedForMaxSize is the maximum number of entries in +// ResourceClaimReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 +const ResourceClaimReservedForMaxSize = 256 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go index 1d0176c..4ecc35d 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go @@ -300,7 +300,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/modules.txt b/vendor/modules.txt index 164b023..6d0a50a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -639,7 +639,7 @@ helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.32.0 +# k8s.io/api v0.32.1 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -700,7 +700,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.32.0 +# k8s.io/apiextensions-apiserver v0.32.1 ## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -711,7 +711,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.32.0 +# k8s.io/apimachinery v0.32.1 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -777,7 +777,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.32.0 +# k8s.io/apiserver v0.32.1 ## explicit; go 1.23.0 k8s.io/apiserver/pkg/endpoints/deprecation # k8s.io/cli-runtime v0.31.1 @@ -786,7 +786,7 @@ k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.32.0 +# k8s.io/client-go v0.32.1 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -949,7 +949,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.32.0 +# k8s.io/component-base v0.32.1 ## explicit; go 1.23.0 k8s.io/component-base/version # k8s.io/klog/v2 v2.130.1