From 7483c92f1113580f3e1c6b9884ef0f1b958fd843 Mon Sep 17 00:00:00 2001 From: Flavio Castelli Date: Wed, 5 Feb 2025 14:33:12 +0100 Subject: [PATCH 1/9] chore(deps): replace backoff with backon (#1653) Replace the `backoff` dependency with `backon`. The former one is no longer maintained and is also pulling the `instant` crate, which has been marked as unmaintained by RUSTSEC. Prior to this commit the public API of kube-rs exposed a trait defined by the `backoff` crate. This commits introduces a new trait defined by kube-rs, which wraps the `backon` trait. Fixes https://github.com/kube-rs/kube/issues/1635 Signed-off-by: Flavio Castelli --- Cargo.toml | 2 +- examples/Cargo.toml | 2 +- kube-runtime/Cargo.toml | 2 +- kube-runtime/src/controller/mod.rs | 7 +- kube-runtime/src/events.rs | 2 +- kube-runtime/src/utils/backoff_reset_timer.rs | 62 +++++++-------- kube-runtime/src/utils/mod.rs | 2 +- kube-runtime/src/utils/stream_backoff.rs | 78 ++++++++++++++++--- kube-runtime/src/utils/watch_ext.rs | 6 +- kube-runtime/src/watcher.rs | 71 +++++++++++++---- 10 files changed, 166 insertions(+), 68 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 906fece31..a6633ac7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ assert-json-diff = "2.0.2" async-broadcast = "0.7.0" async-stream = "0.3.5" async-trait = "0.1.64" -backoff = "0.4.0" +backon = "1.3" base64 = "0.22.1" bytes = "1.1.0" chrono = { version = "0.4.34", default-features = false } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index f260bc73b..b5bbe2c7c 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -52,7 +52,7 @@ tower-http = { workspace = true, features = ["trace", "decompression-gzip"] } hyper = { workspace = true, features = ["client", "http1"] } hyper-util = { workspace = true, features = ["client-legacy", "http1", "tokio"] } thiserror.workspace = true -backoff.workspace = true +backon.workspace = true clap = { version = "4.0", default-features = false, features = ["std", "cargo", "derive"] } edit = "0.1.3" tokio-stream = { version = "0.1.9", features = ["net"] } diff --git a/kube-runtime/Cargo.toml b/kube-runtime/Cargo.toml index 959601641..09975cf5f 100644 --- a/kube-runtime/Cargo.toml +++ b/kube-runtime/Cargo.toml @@ -43,7 +43,7 @@ json-patch.workspace = true jsonptr.workspace = true serde_json.workspace = true thiserror.workspace = true -backoff.workspace = true +backon.workspace = true async-trait.workspace = true hashbrown.workspace = true k8s-openapi.workspace = true diff --git a/kube-runtime/src/controller/mod.rs b/kube-runtime/src/controller/mod.rs index e701b1d1b..8a8f7fc3a 100644 --- a/kube-runtime/src/controller/mod.rs +++ b/kube-runtime/src/controller/mod.rs @@ -8,10 +8,11 @@ use crate::{ ObjectRef, }, scheduler::{debounced_scheduler, ScheduleRequest}, - utils::{trystream_try_via, CancelableJoinHandle, KubeRuntimeStreamExt, StreamBackoff, WatchStreamExt}, + utils::{ + trystream_try_via, Backoff, CancelableJoinHandle, KubeRuntimeStreamExt, StreamBackoff, WatchStreamExt, + }, watcher::{self, metadata_watcher, watcher, DefaultBackoff}, }; -use backoff::backoff::Backoff; use educe::Educe; use futures::{ channel, @@ -915,7 +916,7 @@ where /// The [`default_backoff`](crate::watcher::default_backoff) follows client-go conventions, /// but can be overridden by calling this method. #[must_use] - pub fn trigger_backoff(mut self, backoff: impl Backoff + Send + 'static) -> Self { + pub fn trigger_backoff(mut self, backoff: impl Backoff + 'static) -> Self { self.trigger_backoff = Box::new(backoff); self } diff --git a/kube-runtime/src/events.rs b/kube-runtime/src/events.rs index 5b811977b..d62873315 100644 --- a/kube-runtime/src/events.rs +++ b/kube-runtime/src/events.rs @@ -269,7 +269,7 @@ impl Recorder { deprecated_source: None, event_time: Some(MicroTime(now)), regarding: Some(reference.clone()), - note: ev.note.clone().map(Into::into), + note: ev.note.clone(), metadata: ObjectMeta { namespace: reference.namespace.clone(), name: Some(format!( diff --git a/kube-runtime/src/utils/backoff_reset_timer.rs b/kube-runtime/src/utils/backoff_reset_timer.rs index 1c09a5344..e18817c24 100644 --- a/kube-runtime/src/utils/backoff_reset_timer.rs +++ b/kube-runtime/src/utils/backoff_reset_timer.rs @@ -1,36 +1,40 @@ use std::time::{Duration, Instant}; -use backoff::{backoff::Backoff, Clock, SystemClock}; +pub trait Backoff: Iterator + Send + Sync + Unpin { + /// Resets the internal state to the initial value. + fn reset(&mut self); +} + +impl Backoff for Box { + fn reset(&mut self) { + let this: &mut B = self; + this.reset() + } +} /// A [`Backoff`] wrapper that resets after a fixed duration has elapsed. -pub struct ResetTimerBackoff { +pub struct ResetTimerBackoff { backoff: B, - clock: C, last_backoff: Option, reset_duration: Duration, } impl ResetTimerBackoff { pub fn new(backoff: B, reset_duration: Duration) -> Self { - Self::new_with_custom_clock(backoff, reset_duration, SystemClock {}) - } -} - -impl ResetTimerBackoff { - fn new_with_custom_clock(backoff: B, reset_duration: Duration, clock: C) -> Self { Self { backoff, - clock, last_backoff: None, reset_duration, } } } -impl Backoff for ResetTimerBackoff { - fn next_backoff(&mut self) -> Option { +impl Iterator for ResetTimerBackoff { + type Item = Duration; + + fn next(&mut self) -> Option { if let Some(last_backoff) = self.last_backoff { - if self.clock.now() > last_backoff + self.reset_duration { + if tokio::time::Instant::now().into_std() > last_backoff + self.reset_duration { tracing::debug!( ?last_backoff, reset_duration = ?self.reset_duration, @@ -39,48 +43,40 @@ impl Backoff for ResetTimerBackoff { self.backoff.reset(); } } - self.last_backoff = Some(self.clock.now()); - self.backoff.next_backoff() + self.last_backoff = Some(tokio::time::Instant::now().into_std()); + self.backoff.next() } +} +impl Backoff for ResetTimerBackoff { fn reset(&mut self) { - // Do not even bother trying to reset here, since `next_backoff` will take care of this when the timer expires. + self.backoff.reset(); } } #[cfg(test)] mod tests { - use backoff::{backoff::Backoff, Clock}; use tokio::time::advance; use super::ResetTimerBackoff; use crate::utils::stream_backoff::tests::LinearBackoff; - use std::time::{Duration, Instant}; + use std::time::Duration; #[tokio::test] async fn should_reset_when_timer_expires() { tokio::time::pause(); - let mut backoff = ResetTimerBackoff::new_with_custom_clock( + let mut backoff = ResetTimerBackoff::new( LinearBackoff::new(Duration::from_secs(2)), Duration::from_secs(60), - TokioClock, ); - assert_eq!(backoff.next_backoff(), Some(Duration::from_secs(2))); + assert_eq!(backoff.next(), Some(Duration::from_secs(2))); advance(Duration::from_secs(40)).await; - assert_eq!(backoff.next_backoff(), Some(Duration::from_secs(4))); + assert_eq!(backoff.next(), Some(Duration::from_secs(4))); advance(Duration::from_secs(40)).await; - assert_eq!(backoff.next_backoff(), Some(Duration::from_secs(6))); + assert_eq!(backoff.next(), Some(Duration::from_secs(6))); advance(Duration::from_secs(80)).await; - assert_eq!(backoff.next_backoff(), Some(Duration::from_secs(2))); + assert_eq!(backoff.next(), Some(Duration::from_secs(2))); advance(Duration::from_secs(80)).await; - assert_eq!(backoff.next_backoff(), Some(Duration::from_secs(2))); - } - - struct TokioClock; - - impl Clock for TokioClock { - fn now(&self) -> Instant { - tokio::time::Instant::now().into_std() - } + assert_eq!(backoff.next(), Some(Duration::from_secs(2))); } } diff --git a/kube-runtime/src/utils/mod.rs b/kube-runtime/src/utils/mod.rs index 74cc7cf2f..e2722b0fa 100644 --- a/kube-runtime/src/utils/mod.rs +++ b/kube-runtime/src/utils/mod.rs @@ -9,7 +9,7 @@ mod reflect; mod stream_backoff; mod watch_ext; -pub use backoff_reset_timer::ResetTimerBackoff; +pub use backoff_reset_timer::{Backoff, ResetTimerBackoff}; pub use event_decode::EventDecode; pub use event_modify::EventModify; pub use predicate::{predicates, Predicate, PredicateFilter}; diff --git a/kube-runtime/src/utils/stream_backoff.rs b/kube-runtime/src/utils/stream_backoff.rs index 01c6c4292..a23a3461e 100644 --- a/kube-runtime/src/utils/stream_backoff.rs +++ b/kube-runtime/src/utils/stream_backoff.rs @@ -1,10 +1,11 @@ use std::{future::Future, pin::Pin, task::Poll}; -use backoff::backoff::Backoff; use futures::{Stream, TryStream}; use pin_project::pin_project; use tokio::time::{sleep, Instant, Sleep}; +use crate::utils::Backoff; + /// Applies a [`Backoff`] policy to a [`Stream`] /// /// After any [`Err`] is emitted, the stream is paused for [`Backoff::next_backoff`]. The @@ -71,7 +72,7 @@ impl Stream for StreamBackoff { let next_item = this.stream.try_poll_next(cx); match &next_item { Poll::Ready(Some(Err(_))) => { - if let Some(backoff_duration) = this.backoff.next_backoff() { + if let Some(backoff_duration) = this.backoff.next() { let backoff_sleep = sleep(backoff_duration); tracing::debug!( deadline = ?backoff_sleep.deadline(), @@ -98,16 +99,54 @@ impl Stream for StreamBackoff { pub(crate) mod tests { use std::{pin::pin, task::Poll, time::Duration}; + use crate::utils::Backoff; + use super::StreamBackoff; - use backoff::backoff::Backoff; + use backon::BackoffBuilder; use futures::{channel::mpsc, poll, stream, StreamExt}; + pub struct ConstantBackoff { + inner: backon::ConstantBackoff, + delay: Duration, + max_times: usize, + } + + impl ConstantBackoff { + pub fn new(delay: Duration, max_times: usize) -> Self { + Self { + inner: backon::ConstantBuilder::default() + .with_delay(delay) + .with_max_times(max_times) + .build(), + delay, + max_times, + } + } + } + + impl Iterator for ConstantBackoff { + type Item = Duration; + + fn next(&mut self) -> Option { + self.inner.next() + } + } + + impl Backoff for ConstantBackoff { + fn reset(&mut self) { + self.inner = backon::ConstantBuilder::default() + .with_delay(self.delay) + .with_max_times(self.max_times) + .build(); + } + } + #[tokio::test] async fn stream_should_back_off() { tokio::time::pause(); let tick = Duration::from_secs(1); let rx = stream::iter([Ok(0), Ok(1), Err(2), Ok(3), Ok(4)]); - let mut rx = pin!(StreamBackoff::new(rx, backoff::backoff::Constant::new(tick))); + let mut rx = pin!(StreamBackoff::new(rx, ConstantBackoff::new(tick, 10))); assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(0)))); assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(1)))); assert_eq!(poll!(rx.next()), Poll::Ready(Some(Err(2)))); @@ -149,16 +188,27 @@ pub(crate) mod tests { #[tokio::test] async fn backoff_should_close_when_requested() { assert_eq!( - StreamBackoff::new( - stream::iter([Ok(0), Ok(1), Err(2), Ok(3)]), - backoff::backoff::Stop {} - ) - .collect::>() - .await, + StreamBackoff::new(stream::iter([Ok(0), Ok(1), Err(2), Ok(3)]), StoppedBackoff {}) + .collect::>() + .await, vec![Ok(0), Ok(1), Err(2)] ); } + struct StoppedBackoff; + + impl Backoff for StoppedBackoff { + fn reset(&mut self) {} + } + + impl Iterator for StoppedBackoff { + type Item = Duration; + + fn next(&mut self) -> Option { + None + } + } + /// Dynamic backoff policy that is still deterministic and testable pub struct LinearBackoff { interval: Duration, @@ -174,12 +224,16 @@ pub(crate) mod tests { } } - impl Backoff for LinearBackoff { - fn next_backoff(&mut self) -> Option { + impl Iterator for LinearBackoff { + type Item = Duration; + + fn next(&mut self) -> Option { self.current_duration += self.interval; Some(self.current_duration) } + } + impl Backoff for LinearBackoff { fn reset(&mut self) { self.current_duration = Duration::ZERO } diff --git a/kube-runtime/src/utils/watch_ext.rs b/kube-runtime/src/utils/watch_ext.rs index 7ed636201..241871837 100644 --- a/kube-runtime/src/utils/watch_ext.rs +++ b/kube-runtime/src/utils/watch_ext.rs @@ -9,10 +9,12 @@ use crate::{ }; use kube_client::Resource; -use crate::{reflector::store::Writer, utils::Reflect}; +use crate::{ + reflector::store::Writer, + utils::{Backoff, Reflect}, +}; use crate::watcher::DefaultBackoff; -use backoff::backoff::Backoff; use futures::{Stream, TryStream}; /// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector) diff --git a/kube-runtime/src/watcher.rs b/kube-runtime/src/watcher.rs index 8a649ec17..755320b38 100644 --- a/kube-runtime/src/watcher.rs +++ b/kube-runtime/src/watcher.rs @@ -2,9 +2,10 @@ //! //! See [`watcher`] for the primary entry point. -use crate::utils::ResetTimerBackoff; +use crate::utils::{Backoff, ResetTimerBackoff}; + use async_trait::async_trait; -use backoff::{backoff::Backoff, ExponentialBackoff}; +use backon::BackoffBuilder; use educe::Educe; use futures::{stream::BoxStream, Stream, StreamExt}; use kube_client::{ @@ -882,6 +883,52 @@ pub fn watch_object Self { + Self { + inner: backon::ExponentialBuilder::default() + .with_min_delay(min_delay) + .with_max_delay(max_delay) + .with_factor(factor) + .with_jitter() + .build(), + min_delay, + max_delay, + factor, + enable_jitter, + } + } +} + +impl Backoff for ExponentialBackoff { + fn reset(&mut self) { + let mut builder = backon::ExponentialBuilder::default() + .with_min_delay(self.min_delay) + .with_max_delay(self.max_delay) + .with_factor(self.factor); + if self.enable_jitter { + builder = builder.with_jitter(); + } + self.inner = builder.build(); + } +} + +impl Iterator for ExponentialBackoff { + type Item = Duration; + + fn next(&mut self) -> Option { + self.inner.next() + } +} + /// Default watcher backoff inspired by Kubernetes' client-go. /// /// The parameters currently optimize for being kind to struggling apiservers. @@ -898,24 +945,22 @@ type Strategy = ResetTimerBackoff; impl Default for DefaultBackoff { fn default() -> Self { Self(ResetTimerBackoff::new( - backoff::ExponentialBackoffBuilder::new() - .with_initial_interval(Duration::from_millis(800)) - .with_max_interval(Duration::from_secs(30)) - .with_randomization_factor(1.0) - .with_multiplier(2.0) - .with_max_elapsed_time(None) - .build(), + ExponentialBackoff::new(Duration::from_millis(800), Duration::from_secs(30), 2.0, true), Duration::from_secs(120), )) } } -impl Backoff for DefaultBackoff { - fn next_backoff(&mut self) -> Option { - self.0.next_backoff() +impl Iterator for DefaultBackoff { + type Item = Duration; + + fn next(&mut self) -> Option { + self.0.next() } +} +impl Backoff for DefaultBackoff { fn reset(&mut self) { - self.0.reset() + self.0.reset(); } } From 93a7b05fa8ad38dc721e281990b949b95d69fd00 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 14:28:26 +0000 Subject: [PATCH 2/9] Update garde requirement from 0.21.0 to 0.22.0 (#1679) Updates the requirements on [garde](https://github.com/jprochazk/garde) to permit the latest version. - [Release notes](https://github.com/jprochazk/garde/releases) - [Commits](https://github.com/jprochazk/garde/compare/v0.21.0...v0.22.0) --- updated-dependencies: - dependency-name: garde dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b5bbe2c7c..3abb06521 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -24,7 +24,7 @@ latest = ["k8s-openapi/latest"] [dev-dependencies] tokio-util.workspace = true assert-json-diff.workspace = true -garde = { version = "0.21.0", default-features = false, features = ["derive"] } +garde = { version = "0.22.0", default-features = false, features = ["derive"] } anyhow.workspace = true futures = { workspace = true, features = ["async-await"] } jsonpath-rust.workspace = true From 8188d395a7381b987ff137b2787c3f3abd270e7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 14:29:41 +0000 Subject: [PATCH 3/9] Bump clechasseur/rs-clippy-check from 3 to 4 (#1677) Bumps [clechasseur/rs-clippy-check](https://github.com/clechasseur/rs-clippy-check) from 3 to 4. - [Release notes](https://github.com/clechasseur/rs-clippy-check/releases) - [Commits](https://github.com/clechasseur/rs-clippy-check/compare/v3...v4) --- updated-dependencies: - dependency-name: clechasseur/rs-clippy-check dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Eirik A --- .github/workflows/clippy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index 493b22fc4..a11059080 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -14,6 +14,6 @@ jobs: - uses: dtolnay/rust-toolchain@nightly with: components: clippy - - uses: clechasseur/rs-clippy-check@v3 + - uses: clechasseur/rs-clippy-check@v4 with: args: --workspace From 5b589ceebf8a684dcb1dc09f515a69a7c9cb7770 Mon Sep 17 00:00:00 2001 From: Eirik A Date: Wed, 5 Feb 2025 15:02:00 +0000 Subject: [PATCH 4/9] Fix dumbass pendantic clippy. (#1685) wah, semicolons. Signed-off-by: clux --- kube-runtime/src/controller/runner.rs | 4 ++-- kube-runtime/src/events.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kube-runtime/src/controller/runner.rs b/kube-runtime/src/controller/runner.rs index ddc87fbc6..697460763 100644 --- a/kube-runtime/src/controller/runner.rs +++ b/kube-runtime/src/controller/runner.rs @@ -121,8 +121,8 @@ where Poll::Pending | Poll::Ready(None) => break Poll::Pending, // The above future never returns Poll::Ready(Some(_)). _ => unreachable!(), - }; - }; + } + } // Try to take a new message that isn't already being processed // leave the already-processing ones in the queue, so that we can take them once diff --git a/kube-runtime/src/events.rs b/kube-runtime/src/events.rs index d62873315..19c683dbf 100644 --- a/kube-runtime/src/events.rs +++ b/kube-runtime/src/events.rs @@ -345,7 +345,7 @@ impl Recorder { .await?; } else { events.create(&PostParams::default(), &event).await?; - }; + } { let mut cache = self.cache.write().await; From 0bcc62572d29471f73a1ecd1abf16060e2f138d6 Mon Sep 17 00:00:00 2001 From: Eirik A Date: Wed, 5 Feb 2025 20:39:23 +0000 Subject: [PATCH 5/9] Bump `rand` to 0.9 (#1686) * Bump rand to 0.9 and update renamed fn Signed-off-by: clux * fmt Signed-off-by: clux * deny exclude dupes: man that's a lot of deps for rand.. Signed-off-by: clux * some more breaking changes Signed-off-by: clux --------- Signed-off-by: clux --- Cargo.toml | 2 +- deny.toml | 14 ++++++++++++++ kube-runtime/src/reflector/mod.rs | 6 +++--- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a6633ac7f..b8c1af8c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,7 +68,7 @@ pem = "3.0.1" pin-project = "1.0.4" proc-macro2 = "1.0.29" quote = "1.0.10" -rand = "0.8.3" +rand = "0.9.0" rustls = { version = "0.23.16", default-features = false } rustls-pemfile = "2.0.0" schemars = "0.8.6" diff --git a/deny.toml b/deny.toml index 2f1704961..f2d2dfe0c 100644 --- a/deny.toml +++ b/deny.toml @@ -97,3 +97,17 @@ name = "thiserror-impl" name = "security-framework" [[bans.skip]] name = "core-foundation" + +# currently tungstenite hasn't upgraded rand to 0.9 yet, all these are related +[[bans.skip]] +name = "rand" +[[bans.skip]] +name = "rand_core" +[[bans.skip]] +name = "rand_chacha" +[[bans.skip]] +name = "getrandom" +[[bans.skip]] +name = "wasi" +[[bans.skip]] +name = "zerocopy" diff --git a/kube-runtime/src/reflector/mod.rs b/kube-runtime/src/reflector/mod.rs index 068f83a9a..88f4f2910 100644 --- a/kube-runtime/src/reflector/mod.rs +++ b/kube-runtime/src/reflector/mod.rs @@ -141,7 +141,7 @@ mod tests { use futures::{stream, StreamExt, TryStreamExt}; use k8s_openapi::{api::core::v1::ConfigMap, apimachinery::pkg::apis::meta::v1::ObjectMeta}; use rand::{ - distributions::{Bernoulli, Uniform}, + distr::{Bernoulli, Uniform}, Rng, }; use std::collections::{BTreeMap, HashMap}; @@ -256,8 +256,8 @@ mod tests { #[tokio::test] async fn reflector_store_should_not_contain_duplicates() { - let mut rng = rand::thread_rng(); - let item_dist = Uniform::new(0_u8, 100); + let mut rng = rand::rng(); + let item_dist = Uniform::new(0_u8, 100).unwrap(); let deleted_dist = Bernoulli::new(0.40).unwrap(); let store_w = store::Writer::default(); let store = store_w.as_reader(); From 267c2249ffe5e92b8eb5c035b975ec6fc704bba8 Mon Sep 17 00:00:00 2001 From: tottoto Date: Sat, 8 Feb 2025 17:19:49 +0900 Subject: [PATCH 6/9] use rustls-pki-types pem api (#1690) Signed-off-by: tottoto --- Cargo.toml | 1 - kube-client/Cargo.toml | 3 +-- kube-client/src/client/tls.rs | 19 ++++++++----------- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b8c1af8c6..62a488ca7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,6 @@ proc-macro2 = "1.0.29" quote = "1.0.10" rand = "0.9.0" rustls = { version = "0.23.16", default-features = false } -rustls-pemfile = "2.0.0" schemars = "0.8.6" secrecy = "0.10.2" serde = "1.0.130" diff --git a/kube-client/Cargo.toml b/kube-client/Cargo.toml index 30de09717..496a69d77 100644 --- a/kube-client/Cargo.toml +++ b/kube-client/Cargo.toml @@ -13,7 +13,7 @@ categories = ["web-programming::http-client", "network-programming", "api-bindin [features] default = ["client"] -rustls-tls = ["rustls", "rustls-pemfile", "hyper-rustls", "hyper-http-proxy?/rustls-tls-native-roots"] +rustls-tls = ["rustls", "hyper-rustls", "hyper-http-proxy?/rustls-tls-native-roots"] webpki-roots = ["hyper-rustls/webpki-roots"] aws-lc-rs = ["rustls?/aws-lc-rs"] openssl-tls = ["openssl", "hyper-openssl"] @@ -57,7 +57,6 @@ futures = { workspace = true, optional = true, features = ["std"] } pem = { workspace = true, optional = true } openssl = { workspace = true, optional = true } rustls = { workspace = true, optional = true } -rustls-pemfile = { workspace = true, optional = true } bytes = { workspace = true, optional = true } tokio = { workspace = true, features = ["time", "signal", "sync"], optional = true } kube-core = { path = "../kube-core", version = "=0.98.0" } diff --git a/kube-client/src/client/tls.rs b/kube-client/src/client/tls.rs index 25bdb737e..c264f5a95 100644 --- a/kube-client/src/client/tls.rs +++ b/kube-client/src/client/tls.rs @@ -14,7 +14,7 @@ pub mod rustls_tls { pub enum Error { /// Identity PEM is invalid #[error("identity PEM is invalid: {0}")] - InvalidIdentityPem(#[source] std::io::Error), + InvalidIdentityPem(#[source] rustls::pki_types::pem::Error), /// Identity PEM is missing a private key: the key must be PKCS8 or RSA/PKCS1 #[error("identity PEM is missing a private key: the key must be PKCS8 or RSA/PKCS1")] @@ -96,22 +96,19 @@ pub mod rustls_tls { } fn client_auth(data: &[u8]) -> Result<(Vec>, PrivateKeyDer<'static>), Error> { - use rustls_pemfile::Item; + use rustls::pki_types::pem::{self, SectionKind}; let mut cert_chain = Vec::new(); let mut pkcs8_key = None; let mut pkcs1_key = None; let mut sec1_key = None; let mut reader = std::io::Cursor::new(data); - for item in rustls_pemfile::read_all(&mut reader) - .collect::, _>>() - .map_err(Error::InvalidIdentityPem)? - { - match item { - Item::X509Certificate(cert) => cert_chain.push(cert), - Item::Pkcs8Key(key) => pkcs8_key = Some(PrivateKeyDer::Pkcs8(key)), - Item::Pkcs1Key(key) => pkcs1_key = Some(PrivateKeyDer::from(key)), - Item::Sec1Key(key) => sec1_key = Some(PrivateKeyDer::from(key)), + while let Some((kind, der)) = pem::from_buf(&mut reader).map_err(Error::InvalidIdentityPem)? { + match kind { + SectionKind::Certificate => cert_chain.push(der.into()), + SectionKind::PrivateKey => pkcs8_key = Some(PrivateKeyDer::Pkcs8(der.into())), + SectionKind::RsaPrivateKey => pkcs1_key = Some(PrivateKeyDer::Pkcs1(der.into())), + SectionKind::EcPrivateKey => sec1_key = Some(PrivateKeyDer::Sec1(der.into())), _ => return Err(Error::UnknownPrivateKeyFormat), } } From 07b7891ef8d374de020717d7773106f83f5f7282 Mon Sep 17 00:00:00 2001 From: Eirik A Date: Mon, 10 Feb 2025 09:03:26 +0000 Subject: [PATCH 7/9] Remove `rand` dependency in favor of `tungstenite` fn (#1691) Get rid of rand dependency in favor of tungstenite builtin fn Found https://docs.rs/tungstenite/latest/tungstenite/handshake/client/fn.generate_key.html which means we can let upstream tungstenite be in charge of bumping rand. Signed-off-by: clux --- kube-client/Cargo.toml | 3 +-- kube-client/src/client/mod.rs | 2 +- kube-client/src/client/upgrade.rs | 8 -------- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/kube-client/Cargo.toml b/kube-client/Cargo.toml index 496a69d77..d28bf670c 100644 --- a/kube-client/Cargo.toml +++ b/kube-client/Cargo.toml @@ -17,7 +17,7 @@ rustls-tls = ["rustls", "hyper-rustls", "hyper-http-proxy?/rustls-tls-native-roo webpki-roots = ["hyper-rustls/webpki-roots"] aws-lc-rs = ["rustls?/aws-lc-rs"] openssl-tls = ["openssl", "hyper-openssl"] -ws = ["client", "tokio-tungstenite", "rand", "kube-core/ws", "tokio/macros"] +ws = ["client", "tokio-tungstenite", "kube-core/ws", "tokio/macros"] kubelet-debug = ["ws", "kube-core/kubelet-debug"] oauth = ["client", "tame-oauth"] oidc = ["client", "form_urlencoded"] @@ -72,7 +72,6 @@ tower = { workspace = true, features = ["buffer", "filter", "util"], optional = tower-http = { workspace = true, features = ["auth", "map-response-body", "trace"], optional = true } hyper-timeout = { workspace = true, optional = true } tame-oauth = { workspace = true, features = ["gcp"], optional = true } -rand = { workspace = true, optional = true } secrecy = { workspace = true } tracing = { workspace = true, features = ["log"], optional = true } hyper-openssl = { workspace = true, features = ["client-legacy"], optional = true } diff --git a/kube-client/src/client/mod.rs b/kube-client/src/client/mod.rs index cd6c9ac9e..9f6dd0a4c 100644 --- a/kube-client/src/client/mod.rs +++ b/kube-client/src/client/mod.rs @@ -206,7 +206,7 @@ impl Client { http::header::SEC_WEBSOCKET_VERSION, HeaderValue::from_static("13"), ); - let key = upgrade::sec_websocket_key(); + let key = tokio_tungstenite::tungstenite::handshake::client::generate_key(); parts.headers.insert( http::header::SEC_WEBSOCKET_KEY, key.parse().expect("valid header value"), diff --git a/kube-client/src/client/upgrade.rs b/kube-client/src/client/upgrade.rs index 3bfb4f49b..0e8d7d528 100644 --- a/kube-client/src/client/upgrade.rs +++ b/kube-client/src/client/upgrade.rs @@ -86,11 +86,3 @@ pub fn verify_response(res: &Response, key: &str) -> Result<(), UpgradeCon Ok(()) } - -/// Generate a random key for the `Sec-WebSocket-Key` header. -/// This must be nonce consisting of a randomly selected 16-byte value in base64. -pub fn sec_websocket_key() -> String { - use base64::Engine; - let r: [u8; 16] = rand::random(); - base64::engine::general_purpose::STANDARD.encode(r) -} From c191439f65d0f6e717518615455a6f2641453111 Mon Sep 17 00:00:00 2001 From: Techassi Date: Wed, 19 Feb 2025 16:00:04 +0100 Subject: [PATCH 8/9] feat: Add typed scale argument to derive macro (#1656) * feat: Add typed scale argument to derive macro This allows cutomizing the scale subresource by providing key-value items instead of a raw JSON string. For backwards-compatibility, it is still supported to provide a JSON string. However, all examples and tests were converted to the new format. Signed-off-by: Techassi * refactor: Remove k8s_openapi dependency Signed-off-by: Techassi * chore: Adjust doc comment, fix clippy lint Signed-off-by: Techassi * chore: Fix clippy lint in kube-runtime Signed-off-by: Techassi * docs: Adjust doc comment Signed-off-by: Techassi * chore: Use serde derive feature to enable derive macro Signed-off-by: Techassi * chore: Use ignore instead of no_run Signed-off-by: Techassi * test: Add schema test, fix FromMeta implementation Adding this test proved to be very valuable because the FromMeta implemenetation had a few errors and resulted in different panic messages coming from the derive macro. I also added a small note to the #[kube(scale(...))] section stating that the scale subresource can only be used when the status subresource is used as well. I plan to further improve the validation in a future pull request. Signed-off-by: Techassi --------- Signed-off-by: Techassi --- examples/crd_api.rs | 5 +- examples/crd_derive.rs | 5 +- kube-derive/Cargo.toml | 1 + kube-derive/src/custom_resource.rs | 138 +++++++++++++++++++++++++-- kube-derive/src/lib.rs | 16 +++- kube-derive/tests/crd_schema_test.rs | 41 +++++++- kube/src/lib.rs | 5 +- 7 files changed, 199 insertions(+), 12 deletions(-) diff --git a/examples/crd_api.rs b/examples/crd_api.rs index dfbd52e7e..d0123b28b 100644 --- a/examples/crd_api.rs +++ b/examples/crd_api.rs @@ -19,7 +19,10 @@ use kube::{ #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, Validate, JsonSchema)] #[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)] #[kube(status = "FooStatus")] -#[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)] +#[kube(scale( + spec_replicas_path = ".spec.replicas", + status_replicas_path = ".status.replicas" +))] #[kube(printcolumn = r#"{"name":"Team", "jsonPath": ".spec.metadata.team", "type": "string"}"#)] pub struct FooSpec { #[schemars(length(min = 3))] diff --git a/examples/crd_derive.rs b/examples/crd_derive.rs index 4aace0193..0ec836aeb 100644 --- a/examples/crd_derive.rs +++ b/examples/crd_derive.rs @@ -22,7 +22,10 @@ use serde::{Deserialize, Serialize}; derive = "PartialEq", derive = "Default", shortname = "f", - scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#, + scale( + spec_replicas_path = ".spec.replicas", + status_replicas_path = ".status.replicas" + ), printcolumn = r#"{"name":"Spec", "type":"string", "description":"name of foo", "jsonPath":".spec.name"}"#, selectable = "spec.name" )] diff --git a/kube-derive/Cargo.toml b/kube-derive/Cargo.toml index 01320eca3..a1821f640 100644 --- a/kube-derive/Cargo.toml +++ b/kube-derive/Cargo.toml @@ -18,6 +18,7 @@ workspace = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = ["extra-traits"] } +serde = { workspace = true, features = ["derive"] } serde_json.workspace = true darling.workspace = true diff --git a/kube-derive/src/custom_resource.rs b/kube-derive/src/custom_resource.rs index e12d559a6..8c760864e 100644 --- a/kube-derive/src/custom_resource.rs +++ b/kube-derive/src/custom_resource.rs @@ -3,6 +3,7 @@ use darling::{FromDeriveInput, FromMeta}; use proc_macro2::{Ident, Literal, Span, TokenStream}; use quote::{ToTokens, TokenStreamExt as _}; +use serde::Deserialize; use syn::{parse_quote, Data, DeriveInput, Expr, Path, Visibility}; /// Values we can parse from #[kube(attrs)] @@ -33,7 +34,12 @@ struct KubeAttrs { printcolums: Vec, #[darling(multiple)] selectable: Vec, - scale: Option, + + /// Customize the scale subresource, see [Kubernetes docs][1]. + /// + /// [1]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource + scale: Option, + #[darling(default)] crates: Crates, #[darling(multiple, rename = "annotation")] @@ -192,6 +198,122 @@ impl FromMeta for SchemaMode { } } +/// This struct mirrors the fields of `k8s_openapi::CustomResourceSubresourceScale` to support +/// parsing from the `#[kube]` attribute. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +struct Scale { + pub(crate) label_selector_path: Option, + pub(crate) spec_replicas_path: String, + pub(crate) status_replicas_path: String, +} + +// This custom FromMeta implementation is needed for two reasons: +// +// - To enable backwards-compatibility. Up to version 0.97.0 it was only possible to set scale +// subresource values as a JSON string. +// - To be able to declare the scale sub-resource as a list of typed fields. The from_list impl uses +// the derived implementation as inspiration. +impl FromMeta for Scale { + /// This is implemented for backwards-compatibility. It allows that the scale subresource can + /// be deserialized from a JSON string. + fn from_string(value: &str) -> darling::Result { + serde_json::from_str(value).map_err(darling::Error::custom) + } + + fn from_list(items: &[darling::ast::NestedMeta]) -> darling::Result { + let mut errors = darling::Error::accumulator(); + + let mut label_selector_path: (bool, Option>) = (false, None); + let mut spec_replicas_path: (bool, Option) = (false, None); + let mut status_replicas_path: (bool, Option) = (false, None); + + for item in items { + match item { + darling::ast::NestedMeta::Meta(meta) => { + let name = darling::util::path_to_string(meta.path()); + + match name.as_str() { + "label_selector_path" => { + if !label_selector_path.0 { + let path = errors.handle(darling::FromMeta::from_meta(meta)); + label_selector_path = (true, Some(path)) + } else { + errors.push( + darling::Error::duplicate_field("label_selector_path").with_span(&meta), + ); + } + } + "spec_replicas_path" => { + if !spec_replicas_path.0 { + let path = errors.handle(darling::FromMeta::from_meta(meta)); + spec_replicas_path = (true, path) + } else { + errors.push( + darling::Error::duplicate_field("spec_replicas_path").with_span(&meta), + ); + } + } + "status_replicas_path" => { + if !status_replicas_path.0 { + let path = errors.handle(darling::FromMeta::from_meta(meta)); + status_replicas_path = (true, path) + } else { + errors.push( + darling::Error::duplicate_field("status_replicas_path").with_span(&meta), + ); + } + } + other => errors.push(darling::Error::unknown_field(other)), + } + } + darling::ast::NestedMeta::Lit(lit) => { + errors.push(darling::Error::unsupported_format("literal").with_span(&lit.span())) + } + } + } + + if !spec_replicas_path.0 && spec_replicas_path.1.is_none() { + errors.push(darling::Error::missing_field("spec_replicas_path")); + } + + if !status_replicas_path.0 && status_replicas_path.1.is_none() { + errors.push(darling::Error::missing_field("status_replicas_path")); + } + + errors.finish()?; + + Ok(Self { + label_selector_path: label_selector_path.1.unwrap_or_default(), + spec_replicas_path: spec_replicas_path.1.unwrap(), + status_replicas_path: status_replicas_path.1.unwrap(), + }) + } +} + +impl Scale { + fn to_tokens(&self, k8s_openapi: &Path) -> TokenStream { + let apiext = quote! { + #k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1 + }; + + let label_selector_path = self + .label_selector_path + .as_ref() + .map_or_else(|| quote! { None }, |p| quote! { Some(#p.into()) }); + let spec_replicas_path = &self.spec_replicas_path; + let status_replicas_path = &self.status_replicas_path; + + quote! { + #apiext::CustomResourceSubresourceScale { + label_selector_path: #label_selector_path, + spec_replicas_path: #spec_replicas_path.into(), + status_replicas_path: #status_replicas_path.into() + } + } + } +} + pub(crate) fn derive(input: proc_macro2::TokenStream) -> proc_macro2::TokenStream { let derive_input: DeriveInput = match syn::parse2(input) { Err(err) => return err.to_compile_error(), @@ -452,7 +574,13 @@ pub(crate) fn derive(input: proc_macro2::TokenStream) -> proc_macro2::TokenStrea .map(|s| format!(r#"{{ "jsonPath": "{s}" }}"#)) .collect(); let fields = format!("[ {} ]", fields.join(",")); - let scale_code = if let Some(s) = scale { s } else { "".to_string() }; + let scale = scale.map_or_else( + || quote! { None }, + |s| { + let scale = s.to_tokens(&k8s_openapi); + quote! { Some(#scale) } + }, + ); // Ensure it generates for the correct CRD version (only v1 supported now) let apiext = quote! { @@ -564,11 +692,7 @@ pub(crate) fn derive(input: proc_macro2::TokenStream) -> proc_macro2::TokenStrea #k8s_openapi::k8s_if_ge_1_30! { let fields : Vec<#apiext::SelectableField> = #serde_json::from_str(#fields).expect("valid selectableField column json"); } - let scale: Option<#apiext::CustomResourceSubresourceScale> = if #scale_code.is_empty() { - None - } else { - #serde_json::from_str(#scale_code).expect("valid scale subresource json") - }; + let scale: Option<#apiext::CustomResourceSubresourceScale> = #scale; let categories: Vec = #serde_json::from_str(#categories_json).expect("valid categories"); let shorts : Vec = #serde_json::from_str(#short_json).expect("valid shortnames"); let subres = if #has_status { diff --git a/kube-derive/src/lib.rs b/kube-derive/src/lib.rs index 83e008caa..f0049282c 100644 --- a/kube-derive/src/lib.rs +++ b/kube-derive/src/lib.rs @@ -129,8 +129,22 @@ mod resource; /// NOTE: `CustomResourceDefinition`s require a schema. If `schema = "disabled"` then /// `Self::crd()` will not be installable into the cluster as-is. /// -/// ## `#[kube(scale = r#"json"#)]` +/// ## `#[kube(scale(...))]` +/// /// Allow customizing the scale struct for the [scale subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#subresources). +/// It should be noted, that the status subresource must also be enabled to use the scale subresource. This is because +/// the `statusReplicasPath` only accepts JSONPaths under `.status`. +/// +/// ```ignore +/// #[kube(scale( +/// specReplicasPath = ".spec.replicas", +/// statusReplicaPath = ".status.replicas", +/// labelSelectorPath = ".spec.labelSelector" +/// ))] +/// ``` +/// +/// The deprecated way of customizing the scale subresource using a raw JSON string is still +/// support for backwards-compatibility. /// /// ## `#[kube(printcolumn = r#"json"#)]` /// Allows adding straight json to [printcolumns](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#additional-printer-columns). diff --git a/kube-derive/tests/crd_schema_test.rs b/kube-derive/tests/crd_schema_test.rs index 8e8c5cf07..e7c65bfce 100644 --- a/kube-derive/tests/crd_schema_test.rs +++ b/kube-derive/tests/crd_schema_test.rs @@ -1,3 +1,4 @@ +#![allow(missing_docs)] #![recursion_limit = "256"] use assert_json_diff::assert_json_eq; @@ -29,6 +30,12 @@ use std::collections::{HashMap, HashSet}; label("clux.dev", "cluxingv1"), label("clux.dev/persistence", "disabled"), rule = Rule::new("self.metadata.name == 'singleton'"), + status = "Status", + scale( + spec_replicas_path = ".spec.replicas", + status_replicas_path = ".status.replicas", + label_selector_path = ".status.labelSelector" + ), )] #[cel_validate(rule = Rule::new("has(self.nonNullable)"))] #[serde(rename_all = "camelCase")] @@ -62,6 +69,13 @@ struct FooSpec { set: HashSet, } +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub struct Status { + replicas: usize, + label_selector: String, +} + fn default_value() -> String { "default_value".into() } @@ -231,6 +245,14 @@ fn test_crd_schema_matches_expected() { }, { "jsonPath": ".spec.nullable" }], + "subresources": { + "status": {}, + "scale": { + "specReplicasPath": ".spec.replicas", + "labelSelectorPath": ".status.labelSelector", + "statusReplicasPath": ".status.replicas" + } + }, "schema": { "openAPIV3Schema": { "description": "Custom resource representing a Foo", @@ -358,6 +380,24 @@ fn test_crd_schema_matches_expected() { "rule": "has(self.nonNullable)", }], "type": "object" + }, + "status": { + "properties": { + "replicas": { + "type": "integer", + "format": "uint", + "minimum": 0.0, + }, + "labelSelector": { + "type": "string" + } + }, + "required": [ + "labelSelector", + "replicas" + ], + "nullable": true, + "type": "object" } }, "required": [ @@ -370,7 +410,6 @@ fn test_crd_schema_matches_expected() { "type": "object" } }, - "subresources": {}, } ] } diff --git a/kube/src/lib.rs b/kube/src/lib.rs index 1cb9f23c4..615f27353 100644 --- a/kube/src/lib.rs +++ b/kube/src/lib.rs @@ -227,7 +227,10 @@ mod test { #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] #[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)] #[kube(status = "FooStatus")] - #[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)] + #[kube(scale( + spec_replicas_path = ".spec.replicas", + status_replicas_path = ".status.replicas" + ))] #[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure pub struct FooSpec { name: String, From 8876639ae5624b8ac0632d34b7b1f0fa06ca2477 Mon Sep 17 00:00:00 2001 From: Techassi Date: Wed, 19 Feb 2025 16:17:20 +0100 Subject: [PATCH 9/9] feat: Add deprecated argument to derive macro (#1697) * feat: Add deprecated argument to derive macro Signed-off-by: Techassi * docs: Simplify deprecated argument doc comments Signed-off-by: Techassi * chore: Apply suggestion Co-authored-by: Eirik A Signed-off-by: Techassi --------- Signed-off-by: Techassi Co-authored-by: Eirik A --- kube-derive/src/custom_resource.rs | 21 ++++++++++++++++++++- kube-derive/src/lib.rs | 13 +++++++++++++ kube-derive/tests/crd_schema_test.rs | 3 +++ 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/kube-derive/src/custom_resource.rs b/kube-derive/src/custom_resource.rs index 8c760864e..2d2bb37d9 100644 --- a/kube-derive/src/custom_resource.rs +++ b/kube-derive/src/custom_resource.rs @@ -1,6 +1,6 @@ // Generated by darling macros, out of our control #![allow(clippy::manual_unwrap_or_default)] -use darling::{FromDeriveInput, FromMeta}; +use darling::{util::Override, FromDeriveInput, FromMeta}; use proc_macro2::{Ident, Literal, Span, TokenStream}; use quote::{ToTokens, TokenStreamExt as _}; use serde::Deserialize; @@ -60,6 +60,11 @@ struct KubeAttrs { /// Defaults to `true`. #[darling(default = default_served_arg)] served: bool, + + /// Sets the `deprecated` and optionally the `deprecationWarning` property. + /// + /// See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-deprecation + deprecated: Option>, } #[derive(Debug)] @@ -356,6 +361,7 @@ pub(crate) fn derive(input: proc_macro2::TokenStream) -> proc_macro2::TokenStrea rules, storage, served, + deprecated, crates: Crates { kube_core, @@ -640,6 +646,18 @@ pub(crate) fn derive(input: proc_macro2::TokenStream) -> proc_macro2::TokenStrea quote! {} }; + let deprecation = if let Some(deprecation) = deprecated { + match deprecation { + Override::Inherit => quote! { "deprecated": true, }, + Override::Explicit(warning) => quote! { + "deprecated": true, + "deprecationWarning": #warning, + }, + } + } else { + quote! {} + }; + // Known constraints that are hard to enforce elsewhere let compile_constraints = if !selectable.is_empty() { quote! { @@ -672,6 +690,7 @@ pub(crate) fn derive(input: proc_macro2::TokenStream) -> proc_macro2::TokenStrea "name": #version, "served": #served, "storage": #storage, + #deprecation "schema": { "openAPIV3Schema": schema, }, diff --git a/kube-derive/src/lib.rs b/kube-derive/src/lib.rs index f0049282c..1d5adc53b 100644 --- a/kube-derive/src/lib.rs +++ b/kube-derive/src/lib.rs @@ -175,6 +175,19 @@ mod resource; /// ## `#[kube(served = true)]` /// Sets the `served` property to `true` or `false`. /// +/// ## `#[kube(deprecated [= "warning"])]` +/// Sets the `deprecated` property to `true`. +/// +/// ```ignore +/// #[kube(deprecated)] +/// ``` +/// +/// Aditionally, you can provide a `deprecationWarning` using the following example. +/// +/// ```ignore +/// #[kube(deprecated = "Replaced by other CRD")] +/// ``` +/// /// ## `#[kube(rule = Rule::new("self == oldSelf").message("field is immutable"))]` /// Inject a top level CEL validation rule for the top level generated struct. /// This attribute is for resources deriving [`CELSchema`] instead of [`schemars::JsonSchema`]. diff --git a/kube-derive/tests/crd_schema_test.rs b/kube-derive/tests/crd_schema_test.rs index e7c65bfce..b75399bab 100644 --- a/kube-derive/tests/crd_schema_test.rs +++ b/kube-derive/tests/crd_schema_test.rs @@ -23,6 +23,7 @@ use std::collections::{HashMap, HashSet}; shortname = "f", served = false, storage = false, + deprecated = "my warning", selectable = ".spec.nonNullable", selectable = ".spec.nullable", annotation("clux.dev", "cluxingv1"), @@ -239,6 +240,8 @@ fn test_crd_schema_matches_expected() { "name": "v1", "served": false, "storage": false, + "deprecated": true, + "deprecationWarning": "my warning", "additionalPrinterColumns": [], "selectableFields": [{ "jsonPath": ".spec.nonNullable"