From 384d37369e98b409db270bb868bbb04476a78c2b Mon Sep 17 00:00:00 2001 From: Niel Drummond Date: Mon, 15 Jan 2024 08:58:15 +0000 Subject: [PATCH 1/2] chore(deps): Update to hyper 1.1 --- .circleci/config.yml | 2 +- Cargo.toml | 39 ++-- codegen/proto/Cargo.toml | 6 +- codegen/proto/src/lib.rs | 1 + codegen/swagger/Cargo.toml | 4 +- codegen/swagger/README.md | 6 +- codegen/swagger/pom.xml | 6 +- .../src/main/resources/bollard/Cargo.mustache | 2 +- .../main/resources/bollard/models.mustache | 3 +- codegen/swagger/src/models.rs | 3 +- examples/build_buildkit.rs | 2 +- examples/export_oci_image.rs | 4 +- examples/image_from_scratch.rs | 9 +- examples/post_dockerfile.rs | 7 +- src/auth.rs | 1 + src/container.rs | 47 ++--- src/docker.rs | 180 +++++++----------- src/errors.rs | 8 + src/exec.rs | 13 +- src/grpc/driver/docker_container.rs | 47 ++--- src/grpc/driver/moby.rs | 14 +- src/grpc/error.rs | 18 +- src/grpc/export.rs | 36 ++-- src/grpc/io/reader_stream.rs | 1 + src/grpc/mod.rs | 77 +++++--- src/image.rs | 60 +++--- src/lib.rs | 5 - src/named_pipe.rs | 21 +- src/network.rs | 36 ++-- src/read.rs | 128 ++++++++++--- src/secret.rs | 16 +- src/service.rs | 16 +- src/system.rs | 20 +- src/uri.rs | 1 + src/volume.rs | 26 +-- tests/export_test.rs | 10 +- tests/image_test.rs | 29 +-- tests/version_test.rs | 1 + 38 files changed, 476 insertions(+), 429 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e8e8bcb0..1c1b587e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ jobs: - run: docker run -h test.example.com --volumes-from certs -d --privileged --name test-docker-daemon docker:stable-dind --storage-driver=overlay --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/cert.pem --tlskey=/certs/key.pem - run: docker run --rm --volumes-from certs --privileged --rm --entrypoint=chmod docker:stable-dind 644 /certs/key.pem /certs/ca-key.pem - run: docker build -t bollard . - - run: docker run -ti -e DOCKER_CERT_PATH=/certs -e DOCKER_HOST='tcp://test.example.com:2376' --volumes-from certs --rm --link test-docker-daemon:docker bollard cargo test --features test_ssl,ct_logs -- --test test_version_ssl + - run: docker run -ti -e DOCKER_CERT_PATH=/certs -e DOCKER_HOST='tcp://test.example.com:2376' --volumes-from certs --rm --link test-docker-daemon:docker bollard cargo test --features test_ssl,webpki -- --test test_version_ssl test_http: docker: - image: docker:24.0.5 diff --git a/Cargo.toml b/Cargo.toml index b1077156..65162a66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,8 +19,9 @@ members = [ ] [features] +default = [] # Enable Buildkit-enabled docker image building -buildkit = ["chrono", "num", "rand", "tokio/fs", "tokio-stream", "tokio-util/io", "tower", "tonic", "tower-service", "ssl", "bollard-stubs/buildkit", "bollard-buildkit-proto"] +buildkit = ["chrono", "num", "rand", "tokio/fs", "tokio-util/io", "tonic", "tower-service", "ssl", "bollard-stubs/buildkit", "bollard-buildkit-proto"] # Enable tests specifically for the http connector test_http = [] # Enable tests specifically for rustls @@ -30,63 +31,63 @@ test_macos = [] # Enable JSON payload in deserialization errors json_data_content = [] # Enable rustls / ssl -ssl = ["home", "hyper-rustls", "rustls", "rustls-native-certs", "rustls-pemfile", "webpki", "webpki-roots"] -ct_logs = ["ssl", "ct-logs"] +ssl = ["home", "hyper-rustls", "rustls", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types"] +webpki = ["ssl", "dep:webpki-roots"] chrono = ["dep:chrono", "bollard-stubs/chrono"] time = ["dep:time", "bollard-stubs/time"] [dependencies] base64 = "0.21" -bollard-stubs = { path = "codegen/swagger", version = "=1.43.0-rc.2", default-features = false } -bollard-buildkit-proto = { path = "codegen/proto", version = "=0.2.0", optional = true } +bollard-stubs = { path = "codegen/swagger", version = "=1.43.0-rc.3", default-features = false } +bollard-buildkit-proto = { path = "codegen/proto", version = "=0.2.1", optional = true } bytes = "1" chrono = { version = "0.4", default-features = false, features = ["std", "clock", "serde"], optional = true } -ct-logs = { version = "0.9.0", optional = true } home = { version = "0.5", optional = true } futures-core = "0.3" futures-util = "0.3" hex = "0.4.2" -http = "0.2" -hyper = { version = "0.14", features = ["client", "tcp", "http1", "http2", "stream"] } -hyper-rustls = { version = "0.24", optional = true } +http = "1.0" +hyper = { version = "1", features = ["client", "http1"] } +hyper-rustls = { version = "0.26", optional = true } +hyper-util = { version = "0.1.2", features = ["http1", "client-legacy", "tokio"] } +http-body-util = "0.1.0" log = "0.4" pin-project-lite = "0.2.8" num = { version = "0.4", optional = true } rand = { version = "0.8", optional = true } -rustls = { version = "0.21", optional = true, features = ["dangerous_configuration"] } -rustls-native-certs = { version = "0.6.0", optional = true } -rustls-pemfile = { version = "1.0", optional = true } +rustls = { version = "0.22", optional = true, features = ["ring"] } +rustls-native-certs = { version = "0.7.0", optional = true } +rustls-pemfile = { version = "2.0", optional = true } +rustls-pki-types = { version = "1.1", optional = true } serde = "1.0" serde_derive = "1.0" serde_json = "1.0" serde_repr = "0.1.6" serde_urlencoded = "0.7" -tokio = { version = "1.7", features = ["time", "net"] } +tokio = { version = "1.35", features = ["time", "net", "io-util"] } tonic = { version = "0.10", optional = true } -tower = { version = "0.4", optional = true } thiserror = "1.0" time = { version = "0.3", features = ["formatting", "parsing"], optional = true } -tokio-stream = { version = "0.1", optional = true } tokio-util = { version = "0.7", features = ["codec"] } tower-service = { version = "0.3", optional = true } url = "2.2" -webpki-roots = { version = "0.25.2", optional = true } -webpki = { package = "rustls-webpki", version = "0.101.4", optional = true } +webpki-roots = { version = "0.26", optional = true } [dev-dependencies] flate2 = "1.0" tar = "0.4" tokio = { version = "1.7", features = ["fs", "rt-multi-thread", "macros"] } -yup-hyper-mock = "6.0.0" +yup-hyper-mock = { version = "8.0.0" } [target.'cfg(unix)'.dependencies] -hyperlocal = { version = "0.8.0" } +hyperlocal = { git = "https://github.com/softprops/hyperlocal", rev = "34dc8579d74f96b68ddbd55582c76019ae18cfdc", version = "0.9.0-alpha" } [target.'cfg(unix)'.dev-dependencies] termion = "2.0" [target.'cfg(windows)'.dependencies] winapi = { version = "0.3.9", features = ["winerror"] } +tower-service = { version = "0.3" } [package.metadata.docs.rs] features = ["ssl"] diff --git a/codegen/proto/Cargo.toml b/codegen/proto/Cargo.toml index d57c1db9..2895e41c 100644 --- a/codegen/proto/Cargo.toml +++ b/codegen/proto/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "bollard-buildkit-proto" description = "Protobuf definitions to interact with buildkit using Bollard" -version = "0.2.0" +version = "0.2.1" authors = [ "Bollard contributors" ] license = "Apache-2.0" edition = "2021" [features] -build = ["tonic-build", "indexmap"] +build = ["tonic-build"] [[bin]] name = "gen" @@ -18,5 +18,3 @@ tonic = { version = "0.10" } prost = { version = "0.12" } prost-types = "0.12" tonic-build = { version = "0.10", optional = true } -# bug: https://github.com/bluss/indexmap/issues/151#issuecomment-716691744 -indexmap = { version = "2.0.0", features = ["std"], optional = true } diff --git a/codegen/proto/src/lib.rs b/codegen/proto/src/lib.rs index 2f9a0eac..2dc23579 100644 --- a/codegen/proto/src/lib.rs +++ b/codegen/proto/src/lib.rs @@ -36,6 +36,7 @@ pub mod google { pub use prost_types as protobuf; } +#[allow(clippy::all)] pub mod pb { include!("generated/pb.rs"); } diff --git a/codegen/swagger/Cargo.toml b/codegen/swagger/Cargo.toml index 53a4b185..b3a07952 100644 --- a/codegen/swagger/Cargo.toml +++ b/codegen/swagger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bollard-stubs" -version = "1.43.0-rc.2" +version = "1.43.0-rc.3" authors = [ "Bollard contributors" ] description = "Stubs used for the Bollard rust async Docker client API" license = "Apache-2.0" @@ -11,7 +11,7 @@ buildkit = ["base64", "bytes", "bollard-buildkit-proto", "prost"] [dependencies] base64 = { version = "0.21", optional = true } -bollard-buildkit-proto = { path = "../proto", version = "=0.2.0", optional = true } +bollard-buildkit-proto = { path = "../proto", version = "=0.2.1", optional = true } bytes = { version = "1", optional = true } chrono = { version = "0.4", default-features = false, features = ["std", "clock", "serde"], optional = true } serde = { version = "1.0", features = ["derive"] } diff --git a/codegen/swagger/README.md b/codegen/swagger/README.md index c270cfb2..61b7faf8 100644 --- a/codegen/swagger/README.md +++ b/codegen/swagger/README.md @@ -7,9 +7,9 @@ To see how to make this your own, look here: [README](https://github.com/swagger-api/swagger-codegen/blob/master/README.md) -- API version: 1.43.0-rc.2 -- Code generation suffix: 1.43.0-rc.2 -- Build date: 2023-09-15T16:02:09.609+01:00 +- API version: 1.43.0-rc.3 +- Code generation suffix: 1.43.0-rc.3 +- Build date: 2024-01-15T11:26:03.143Z This autogenerated project defines an API crate `bollard-stubs` which contains: * Data types representing the underlying data model. diff --git a/codegen/swagger/pom.xml b/codegen/swagger/pom.xml index 7a511c32..17ffe2a3 100644 --- a/codegen/swagger/pom.xml +++ b/codegen/swagger/pom.xml @@ -2,7 +2,7 @@ 4.0.0 bollard bollard-codegen - 0.1.4-SNAPSHOT + 0.1.5-SNAPSHOT bollard @@ -27,7 +27,7 @@ models.rs,lib.rs,Cargo.toml,config,README.md bollard-stubs - 1.43.0-rc.2 + 1.43.0-rc.3 @@ -36,7 +36,7 @@ bollard bollard-codegen - 0.1.4-SNAPSHOT + 0.1.5-SNAPSHOT diff --git a/codegen/swagger/src/main/resources/bollard/Cargo.mustache b/codegen/swagger/src/main/resources/bollard/Cargo.mustache index 734fc4d4..a8b16a26 100644 --- a/codegen/swagger/src/main/resources/bollard/Cargo.mustache +++ b/codegen/swagger/src/main/resources/bollard/Cargo.mustache @@ -11,7 +11,7 @@ buildkit = ["base64", "bytes", "bollard-buildkit-proto", "prost"] [dependencies] base64 = { version = "0.21", optional = true } -bollard-buildkit-proto = { path = "../proto", version = "=0.2.0", optional = true } +bollard-buildkit-proto = { path = "../proto", version = "=0.2.1", optional = true } bytes = { version = "1", optional = true } chrono = { version = "0.4", default-features = false, features = ["std", "clock", "serde"], optional = true } serde = { version = "1.0", features = ["derive"] } diff --git a/codegen/swagger/src/main/resources/bollard/models.mustache b/codegen/swagger/src/main/resources/bollard/models.mustache index e629c1d0..051aef65 100644 --- a/codegen/swagger/src/main/resources/bollard/models.mustache +++ b/codegen/swagger/src/main/resources/bollard/models.mustache @@ -80,7 +80,8 @@ fn deserialize_buildinfo_aux<'de, D: Deserializer<'de>>( d: D, ) -> Result { let aux: String = serde::Deserialize::deserialize(d)?; - let raw = base64::decode(&aux).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; + let raw = base64::Engine::decode(&base64::engine::general_purpose::STANDARD, &aux) + .map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; let buf = bytes::BytesMut::from(&raw[..]); let res = crate::moby::buildkit::v1::StatusResponse::decode(buf) diff --git a/codegen/swagger/src/models.rs b/codegen/swagger/src/models.rs index 3f66458b..11f8a330 100644 --- a/codegen/swagger/src/models.rs +++ b/codegen/swagger/src/models.rs @@ -80,7 +80,8 @@ fn deserialize_buildinfo_aux<'de, D: Deserializer<'de>>( d: D, ) -> Result { let aux: String = serde::Deserialize::deserialize(d)?; - let raw = base64::decode(&aux).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; + let raw = base64::Engine::decode(&base64::engine::general_purpose::STANDARD, &aux) + .map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; let buf = bytes::BytesMut::from(&raw[..]); let res = crate::moby::buildkit::v1::StatusResponse::decode(buf) diff --git a/examples/build_buildkit.rs b/examples/build_buildkit.rs index e62a74fa..13ae0f94 100644 --- a/examples/build_buildkit.rs +++ b/examples/build_buildkit.rs @@ -13,7 +13,7 @@ use std::io::Write; #[tokio::main] async fn main() { - let docker = Docker::connect_with_unix_defaults().unwrap(); + let docker = Docker::connect_with_socket_defaults().unwrap(); let dockerfile = String::from( "FROM alpine as builder1 diff --git a/examples/export_oci_image.rs b/examples/export_oci_image.rs index 83451170..3841bd6e 100644 --- a/examples/export_oci_image.rs +++ b/examples/export_oci_image.rs @@ -43,7 +43,7 @@ async fn main() { "docker.io/library/bollard-oci-export-buildkit-example:latest", ) .annotation("exporter", "Bollard") - .dest(&std::path::Path::new("/tmp/oci-image.tar")); + .dest(std::path::Path::new("/tmp/oci-image.tar")); let buildkit_builder = DockerContainerBuilder::new("bollard_buildkit_export_oci_image", &docker, session_id); @@ -53,7 +53,7 @@ async fn main() { bollard::grpc::export::ImageExporterLoadInput::Upload(bytes::Bytes::from(compressed)); docker - .image_export_oci(driver, session_id, frontend_opts, output, load_input) + .image_export_oci(driver, session_id, frontend_opts, output, load_input, None) .await .unwrap(); } diff --git a/examples/image_from_scratch.rs b/examples/image_from_scratch.rs index 93488769..bb4a3164 100644 --- a/examples/image_from_scratch.rs +++ b/examples/image_from_scratch.rs @@ -4,9 +4,9 @@ use bollard::models::CreateImageInfo; /// This implementation streams the archive file piece by piece to the Docker daemon, /// but does so inefficiently. For best results, use `tokio::fs` instead of `std::fs`. use bollard::{image::CreateImageOptions, Docker}; +use bytes::Bytes; use futures_util::stream::{Stream, TryStreamExt}; use futures_util::task::{Context, Poll}; -use hyper::body::Body; use std::env::args; use std::fs::File; use std::io::{Read, Result as IOResult}; @@ -68,12 +68,13 @@ async fn main() -> Result<(), Box> { }; // Create FileReader struct let reader = FileStreamer { file, done: false }; - // A `Body` can be created from a `Stream>` - let req_body: Body = Body::wrap_stream(reader); + // A `Bytes` can be created from a `Stream>` + let v = reader.try_concat().await?; + let bytes = Bytes::from(v); // Finally, call Docker::create_image with the options and the body let result: Vec = docker - .create_image(Some(options), Some(req_body), None) + .create_image(Some(options), Some(bytes), None) .try_collect() .await?; // If all went well, the ID of the new image will be printed diff --git a/examples/post_dockerfile.rs b/examples/post_dockerfile.rs index b9f5809b..ef7d5ece 100644 --- a/examples/post_dockerfile.rs +++ b/examples/post_dockerfile.rs @@ -4,8 +4,7 @@ use bollard::image::BuildImageOptions; use bollard::Docker; -use futures_util::stream::StreamExt; -use hyper::body::Body; +use futures_util::{stream::StreamExt, TryStreamExt}; use tokio::fs::File; use tokio_util::codec::{BytesCodec, FramedRead}; @@ -26,9 +25,9 @@ async fn main() { let filename = &args().nth(1).expect("needs first argument"); let archive = File::open(filename).await.expect("could not open file"); let stream = FramedRead::new(archive, BytesCodec::new()); - let body = Body::wrap_stream(stream); + let bytes = stream.try_concat().await.unwrap(); - let mut image_build_stream = docker.build_image(image_options, None, Some(body)); + let mut image_build_stream = docker.build_image(image_options, None, Some(bytes.freeze())); while let Some(msg) = image_build_stream.next().await { println!("Message: {msg:?}"); diff --git a/src/auth.rs b/src/auth.rs index 386060a3..5b6b735a 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -1,6 +1,7 @@ //! Credentials management, for access to the Docker Hub or a custom Registry. use base64::{engine::general_purpose::STANDARD, Engine}; +use serde_derive::{Deserialize, Serialize}; #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[allow(missing_docs)] diff --git a/src/container.rs b/src/container.rs index fcb4f509..767c7380 100644 --- a/src/container.rs +++ b/src/container.rs @@ -4,8 +4,10 @@ use futures_core::Stream; use futures_util::{StreamExt, TryStreamExt}; use http::header::{CONNECTION, CONTENT_TYPE, UPGRADE}; use http::request::Builder; -use hyper::{body::Bytes, Body, Method}; +use http_body_util::Full; +use hyper::{body::Bytes, Method}; use serde::Serialize; +use serde_derive::Deserialize; use tokio::io::AsyncWrite; use tokio_util::codec::FramedRead; @@ -17,7 +19,6 @@ use std::pin::Pin; use super::Docker; use crate::errors::Error; - use crate::models::*; use crate::read::NewlineLogOutputDecoder; @@ -1201,7 +1202,7 @@ impl Docker { url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -1303,7 +1304,7 @@ impl Docker { &url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -1348,7 +1349,7 @@ impl Docker { &url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -1397,7 +1398,7 @@ impl Docker { &url, Builder::new().method(Method::DELETE), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -1448,7 +1449,7 @@ impl Docker { &url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_stream(req).map(|res| match res { @@ -1522,7 +1523,7 @@ impl Docker { .header(CONNECTION, "Upgrade") .header(UPGRADE, "tcp"), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); let (read, write) = self.process_upgraded(req).await?; @@ -1571,7 +1572,7 @@ impl Docker { &url, Builder::new().method(Method::POST), Some(options), - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -1617,7 +1618,7 @@ impl Docker { &url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -1662,7 +1663,7 @@ impl Docker { &url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -1710,7 +1711,7 @@ impl Docker { &url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -1763,7 +1764,7 @@ impl Docker { &url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_stream_string(req) @@ -1802,7 +1803,7 @@ impl Docker { &url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -1850,7 +1851,7 @@ impl Docker { &url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_stream(req) @@ -1899,7 +1900,7 @@ impl Docker { &url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -2000,7 +2001,7 @@ impl Docker { &url, Builder::new().method(Method::POST), Some(options), - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -2035,7 +2036,7 @@ impl Docker { &url, Builder::new().method(Method::POST), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -2070,7 +2071,7 @@ impl Docker { &url, Builder::new().method(Method::POST), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -2121,7 +2122,7 @@ impl Docker { url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -2167,7 +2168,7 @@ impl Docker { &self, container_name: &str, options: Option>, - tar: Body, + tar: Bytes, ) -> Result<(), Error> where T: Into + Serialize, @@ -2180,7 +2181,7 @@ impl Docker { .method(Method::PUT) .header(CONTENT_TYPE, "application/x-tar"), options, - Ok(tar), + Ok(Full::new(tar)), ); self.process_into_unit(req).await @@ -2228,7 +2229,7 @@ impl Docker { &url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_body(req) diff --git a/src/docker.rs b/src/docker.rs index 90f437d2..de6c7178 100644 --- a/src/docker.rs +++ b/src/docker.rs @@ -9,8 +9,6 @@ use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; -#[cfg(feature = "ct_logs")] -use std::time::SystemTime; use std::{cmp, env, fmt}; use futures_core::Stream; @@ -19,14 +17,21 @@ use futures_util::future::TryFutureExt; use futures_util::stream::TryStreamExt; use http::header::CONTENT_TYPE; use http::request::Builder; -use hyper::client::{Client, HttpConnector}; -use hyper::{self, body::Bytes, Body, Method, Request, Response, StatusCode}; +use http_body_util::{BodyExt, Full}; +use hyper::body::Incoming; +use hyper::{self, body::Bytes, Method, Request, Response, StatusCode}; #[cfg(feature = "ssl")] use hyper_rustls::HttpsConnector; +use hyper_util::client::legacy::connect::HttpConnector; +use hyper_util::{client::legacy::Client, rt::TokioExecutor}; #[cfg(unix)] use hyperlocal::UnixConnector; +use log::{debug, trace}; +#[cfg(feature = "ssl")] +use rustls::{crypto::ring::sign::any_supported_type, sign::CertifiedKey, ALL_VERSIONS}; #[cfg(feature = "ssl")] -use rustls::sign::{CertifiedKey, RsaSigningKey}; +use rustls_pki_types::{CertificateDer, PrivateKeyDer}; +use serde_derive::{Deserialize, Serialize}; use tokio::io::{split, AsyncRead, AsyncWrite}; use tokio_util::codec::FramedRead; @@ -35,7 +40,9 @@ use crate::errors::Error; use crate::errors::Error::*; #[cfg(windows)] use crate::named_pipe::NamedPipeConnector; -use crate::read::{JsonLineDecoder, NewlineLogOutputDecoder, StreamReader}; +use crate::read::{ + AsyncUpgraded, IncomingStream, JsonLineDecoder, NewlineLogOutputDecoder, StreamReader, +}; use crate::uri::Uri; use serde::de::DeserializeOwned; @@ -83,23 +90,23 @@ pub(crate) enum ClientType { /// with various Connect traits fulfilled. pub(crate) enum Transport { Http { - client: Client, + client: Client>, }, #[cfg(feature = "ssl")] Https { - client: Client>, + client: Client, Full>, }, #[cfg(unix)] Unix { - client: Client, + client: Client>, }, #[cfg(windows)] NamedPipe { - client: Client, + client: Client>, }, #[cfg(test)] Mock { - client: Client, + client: Client>, }, } @@ -289,6 +296,7 @@ struct DockerServerErrorMessage { } #[cfg(feature = "ssl")] +#[derive(Debug)] struct DockerClientCertResolver { ssl_key: PathBuf, ssl_cert: PathBuf, @@ -312,28 +320,18 @@ impl DockerClientCertResolver { Ok(io::BufReader::new(fs::File::open(path)?)) } - fn certs(path: &Path) -> Result, Error> { + fn certs(path: &Path) -> Result>, Error> { Ok(rustls_pemfile::certs(&mut Self::open_buffered(path)?) - .map_err(|_| CertPathError { - path: path.to_path_buf(), - })? - .iter() - .map(|v| rustls::Certificate(v.clone())) - .collect()) + .collect::>, std::io::Error>>()?) } - fn keys(path: &Path) -> Result, Error> { + fn keys(path: &Path) -> Result>, Error> { let mut rdr = Self::open_buffered(path)?; let mut keys = vec![]; - loop { - match rustls_pemfile::read_one(&mut rdr).map_err(|_| CertPathError { - path: path.to_path_buf(), - })? { - Some(rustls_pemfile::Item::RSAKey(key)) => keys.push(rustls::PrivateKey(key)), - Some(rustls_pemfile::Item::PKCS8Key(key)) => keys.push(rustls::PrivateKey(key)), - None => break, - _ => {} - } + if let Some(key) = rustls_pemfile::private_key(&mut rdr).map_err(|_| CertPathError { + path: path.to_path_buf(), + })? { + keys.push(key); } Ok(keys) @@ -352,14 +350,11 @@ impl DockerClientCertResolver { }); }; - let signing_key = RsaSigningKey::new(&key).map_err(|_| CertParseError { + let signing_key = any_supported_type(&key).map_err(|_| CertParseError { path: self.ssl_key.to_owned(), })?; - Ok(Arc::new(CertifiedKey::new( - all_certs, - Arc::new(signing_key), - ))) + Ok(Arc::new(CertifiedKey::new(all_certs, signing_key))) } } @@ -458,46 +453,25 @@ impl Docker { let client_addr = addr.replacen("tcp://", "", 1).replacen("https://", "", 1); let mut root_store = rustls::RootCertStore::empty(); + + #[cfg(not(feature = "webpki"))] for cert in rustls_native_certs::load_native_certs()? { root_store - .add(&rustls::Certificate(cert.0)) - .map_err(|err| NoNativeCertsError { err })?; + .add(cert) + .map_err(|err| NoNativeCertsError { err })? } - - root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { - rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject, - ta.spki, - ta.name_constraints, - ) - })); + #[cfg(feature = "webpki")] + root_store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned()); let mut ca_pem = io::Cursor::new(fs::read(ssl_ca).map_err(|_| CertPathError { path: ssl_ca.to_owned(), })?); - root_store.add_parsable_certificates(&rustls_pemfile::certs(&mut ca_pem).map_err( - |_| CertParseError { - path: ssl_ca.to_owned(), - }, - )?); - - #[cfg(feature = "ct_logs")] - let config = { - let ct_logs_expiry = - SystemTime::UNIX_EPOCH + Duration::from_secs(TIMESTAMP_CT_LOGS_EXPIRY); - rustls::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(root_store) - .with_certificate_transparency_logs(&ct_logs::LOGS, ct_logs_expiry) - .with_client_cert_resolver(Arc::new(DockerClientCertResolver { - ssl_key: ssl_key.to_owned(), - ssl_cert: ssl_cert.to_owned(), - })) - }; - #[cfg(not(feature = "ct_logs"))] - let config = rustls::ClientConfig::builder() - .with_safe_defaults() + root_store.add_parsable_certificates( + rustls_pemfile::certs(&mut ca_pem).collect::, _>>()?, + ); + + let config = rustls::ClientConfig::builder_with_protocol_versions(ALL_VERSIONS) .with_root_certificates(root_store) .with_client_cert_resolver(Arc::new(DockerClientCertResolver { ssl_key: ssl_key.to_owned(), @@ -510,7 +484,7 @@ impl Docker { let https_connector: HttpsConnector = HttpsConnector::from((http_connector, config)); - let client_builder = Client::builder(); + let client_builder = Client::builder(TokioExecutor::new()); let client = client_builder.build(https_connector); let transport = Transport::Https { client }; let docker = Docker { @@ -586,7 +560,7 @@ impl Docker { let http_connector = HttpConnector::new(); - let client_builder = Client::builder(); + let client_builder = Client::builder(TokioExecutor::new()); let client = client_builder.build(http_connector); let transport = Transport::Http { client }; let docker = Docker { @@ -725,8 +699,7 @@ impl Docker { let unix_connector = UnixConnector; - let mut client_builder = Client::builder(); - client_builder.pool_max_idle_per_host(0); + let client_builder = Client::builder(TokioExecutor::new()); let client = client_builder.build(unix_connector); let transport = Transport::Unix { client }; @@ -801,8 +774,7 @@ impl Docker { let named_pipe_connector = NamedPipeConnector; - let mut client_builder = Client::builder(); - client_builder.pool_max_idle_per_host(0); + let mut client_builder = Client::builder(TokioExecutor::new()); client_builder.http1_title_case_headers(true); let client = client_builder.build(named_pipe_connector); let transport = Transport::NamedPipe { client }; @@ -895,7 +867,7 @@ impl Docker { timeout: u64, client_version: &ClientVersion, ) -> Result { - let client_builder = Client::builder(); + let client_builder = Client::builder(TokioExecutor::new()); let client = client_builder.build(connector); let (transport, client_type) = (Transport::Mock { client }, ClientType::Http); @@ -947,21 +919,18 @@ impl Docker { impl Docker { pub(crate) fn process_into_value( &self, - req: Result, Error>, + req: Result>, Error>, ) -> impl Future> where T: DeserializeOwned, { let fut = self.process_request(req); - async move { - let response = fut.await?; - Docker::decode_response(response).await - } + async move { Docker::decode_response(fut.await?).await } } pub(crate) fn process_into_stream( &self, - req: Result, Error>, + req: Result>, Error>, ) -> impl Stream> + Unpin where T: DeserializeOwned, @@ -976,7 +945,7 @@ impl Docker { pub(crate) fn process_into_stream_string( &self, - req: Result, Error>, + req: Result>, Error>, ) -> impl Stream> + Unpin { Box::pin( self.process_request(req) @@ -987,7 +956,7 @@ impl Docker { pub(crate) fn process_into_unit( &self, - req: Result, Error>, + req: Result>, Error>, ) -> impl Future> { let fut = self.process_request(req); async move { @@ -998,11 +967,11 @@ impl Docker { pub(crate) fn process_into_body( &self, - req: Result, Error>, + req: Result>, Error>, ) -> impl Stream> + Unpin { Box::pin( self.process_request(req) - .map_ok(|response| response.into_body().map_err(Error::from)) + .map_ok(|response| IncomingStream::new(response.into_body())) .into_stream() .try_flatten(), ) @@ -1010,7 +979,7 @@ impl Docker { pub(crate) fn process_into_string( &self, - req: Result, Error>, + req: Result>, Error>, ) -> impl Future> { let fut = self.process_request(req); async move { @@ -1021,14 +990,16 @@ impl Docker { pub(crate) async fn process_upgraded( &self, - req: Result, Error>, + req: Result>, Error>, ) -> Result<(impl AsyncRead, impl AsyncWrite), Error> { let res = self.process_request(req).await?; let upgraded = hyper::upgrade::on(res).await?; - Ok(split(upgraded)) + let tokio_upgraded = AsyncUpgraded::new(upgraded); + + Ok(split(tokio_upgraded)) } - pub(crate) fn serialize_payload(body: Option) -> Result + pub(crate) fn serialize_payload(body: Option) -> Result, Error> where S: Serialize, { @@ -1040,8 +1011,8 @@ impl Docker { .map(|payload| { debug!("{}", payload.clone().unwrap_or_default()); payload - .map(|content| content.into()) - .unwrap_or_else(Body::empty) + .map(|content| Full::new(content.into())) + .unwrap_or(Full::new(Bytes::new())) }) } @@ -1068,7 +1039,7 @@ impl Docker { "/version", Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); let res = self @@ -1100,8 +1071,8 @@ impl Docker { pub(crate) fn process_request( &self, - request: Result, Error>, - ) -> impl Future, Error>> { + request: Result>, Error>, + ) -> impl Future, Error>> { let transport = self.transport.clone(); let timeout = self.client_timeout; @@ -1142,8 +1113,8 @@ impl Docker { path: &str, builder: Builder, query: Option, - payload: Result, - ) -> Result, Error> + payload: Result, Error>, + ) -> Result>, Error> where O: Serialize, { @@ -1164,9 +1135,9 @@ impl Docker { async fn execute_request( transport: Arc, - req: Request, + req: Request>, timeout: u64, - ) -> Result, Error> { + ) -> Result, Error> { // This is where we determine to which transport we issue the request. let request = match *transport { Transport::Http { ref client } => client.request(req), @@ -1186,37 +1157,34 @@ impl Docker { } } - fn decode_into_stream(res: Response) -> impl Stream> + fn decode_into_stream(res: Response) -> impl Stream> where T: DeserializeOwned, { - FramedRead::new( - StreamReader::new(res.into_body().map_err(Error::from)), - JsonLineDecoder::new(), - ) + FramedRead::new(StreamReader::new(res.into_body()), JsonLineDecoder::new()) } fn decode_into_stream_string( - res: Response, + res: Response, ) -> impl Stream> { FramedRead::new( - StreamReader::new(res.into_body().map_err(Error::from)), + StreamReader::new(res.into_body()), NewlineLogOutputDecoder::new(false), ) .map_err(Error::from) } - async fn decode_into_string(response: Response) -> Result { - let body = hyper::body::to_bytes(response.into_body()).await?; + async fn decode_into_string(response: Response) -> Result { + let body = response.into_body().collect().await?.to_bytes(); Ok(String::from_utf8_lossy(&body).to_string()) } - async fn decode_response(response: Response) -> Result + async fn decode_response(response: Response) -> Result where T: DeserializeOwned, { - let bytes = hyper::body::to_bytes(response.into_body()).await?; + let bytes = response.into_body().collect().await?.to_bytes(); debug!("Decoded into string: {}", &String::from_utf8_lossy(&bytes)); diff --git a/src/errors.rs b/src/errors.rs index 1e925ae3..13cf41f7 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -39,6 +39,7 @@ pub enum Error { #[error("Could not load native certs")] NoNativeCertsError { /// The original error emitted. + #[from] err: rustls::Error, }, /// Generic error emitted by the docker server. @@ -149,4 +150,11 @@ pub enum Error { #[from] err: http::uri::InvalidUri, }, + /// Error that is never emitted + #[error("Error in the hyper legacy client: {}", err)] + HyperLegacyError { + /// The original error emitted. + #[from] + err: hyper_util::client::legacy::Error, + }, } diff --git a/src/exec.rs b/src/exec.rs index 834f7605..e3484754 100644 --- a/src/exec.rs +++ b/src/exec.rs @@ -1,11 +1,12 @@ //! Exec API: Run new commands inside running containers +use bytes::Bytes; use futures_util::TryStreamExt; use http::header::{CONNECTION, UPGRADE}; use http::request::Builder; -use hyper::Body; +use http_body_util::Full; use hyper::Method; -use serde::ser::Serialize; +use serde_derive::{Deserialize, Serialize}; use super::Docker; @@ -24,7 +25,7 @@ use tokio_util::codec::FramedRead; #[serde(rename_all = "PascalCase")] pub struct CreateExecOptions where - T: Into + Serialize, + T: Into + serde::ser::Serialize, { /// Attach to `stdin` of the exec command. pub attach_stdin: Option, @@ -142,7 +143,7 @@ impl Docker { config: CreateExecOptions, ) -> Result where - T: Into + Serialize, + T: Into + serde::ser::Serialize, { let url = format!("/containers/{container_name}/exec"); @@ -289,7 +290,7 @@ impl Docker { &url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -340,7 +341,7 @@ impl Docker { &url, Builder::new().method(Method::POST), Some(options), - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await diff --git a/src/grpc/driver/docker_container.rs b/src/grpc/driver/docker_container.rs index bac6fc5f..b7443d5f 100644 --- a/src/grpc/driver/docker_container.rs +++ b/src/grpc/driver/docker_container.rs @@ -20,6 +20,7 @@ use http::{ request::Builder, Method, }; +use log::{debug, error, info, trace}; use tonic::{codegen::InterceptedService, transport::Channel}; use tonic::{service::Interceptor, transport::Endpoint}; use tower_service::Service; @@ -44,7 +45,7 @@ pub const DEFAULT_IMAGE: &str = "moby/buildkit:master"; const DEFAULT_STATE_DIR: &str = "/var/lib/buildkit"; const DUPLEX_BUF_SIZE: usize = 8 * 1024; -impl Service for DockerContainer { +impl Service for DockerContainer { type Response = GrpcFramedTransport; type Error = GrpcError; type Future = Pin> + Send>>; @@ -53,7 +54,7 @@ impl Service for DockerContainer { Poll::Ready(Ok(())) } - fn call(&mut self, _req: http::Uri) -> Self::Future { + fn call(&mut self, _req: tonic::transport::Uri) -> Self::Future { let client = Docker::clone(&self.docker); let name = String::clone(&self.name); @@ -88,14 +89,11 @@ impl Service for DockerContainer { })), ); - client - .process_upgraded(req) - .await - .and_then(|(read, write)| { - let output = Box::pin(read); - let input = Box::pin(write); - Ok(GrpcFramedTransport::new(output, input, capacity)) - }) + client.process_upgraded(req).await.map(|(read, write)| { + let output = Box::pin(read); + let input = Box::pin(write); + GrpcFramedTransport::new(output, input, capacity) + }) }; Box::pin(fut.map_err(From::from)) @@ -159,21 +157,19 @@ impl DockerContainerBuilder { self.network("host"); } - let container_name = &self.inner.name; - match self + if let Err(crate::errors::Error::DockerResponseServerError { + status_code: 404, + message: _, + }) = self .inner .docker .inspect_container(&self.inner.name, None) .await { - Err(crate::errors::Error::DockerResponseServerError { - status_code: 404, - message: _, - }) => self.inner.create().await?, - _ => (), + self.inner.create().await? }; - debug!("starting container {}", &container_name); + debug!("starting container {}", &self.inner.name); self.inner.start().await?; self.inner.wait().await?; @@ -301,7 +297,7 @@ impl<'a> DockerContainer { router = service.append(router); } trace!("router: {:#?}", router); - match router + if let Err(e) = router .serve_with_incoming(futures_util::stream::iter(vec![Ok::< _, tonic::transport::Error, @@ -310,8 +306,7 @@ impl<'a> DockerContainer { )])) .await { - Err(e) => error!("Failed to serve grpc connection: {}", e), - _ => (), + error!("Failed to serve grpc connection: {}", e) } }); @@ -352,7 +347,7 @@ impl<'a> DockerContainer { // place all buildkit containers into this cgroup { Some(if let Some(cgroup_parent) = &self.cgroup_parent { - String::clone(&cgroup_parent) + String::clone(cgroup_parent) } else { String::from("/docker/buildx") }) @@ -360,11 +355,7 @@ impl<'a> DockerContainer { _ => None, }; - let network_mode = if let Some(net_mode) = &self.net_mode { - Some(String::clone(&net_mode)) - } else { - None - }; + let network_mode = self.net_mode.as_ref().map(String::clone); let userns_mode = if let Some(security_options) = &info.security_options { if security_options.iter().any(|f| f == "userns") { @@ -465,7 +456,7 @@ impl<'a> DockerContainer { } _ => { tokio::time::sleep(Duration::from_millis(attempts * 120)).await; - attempts = attempts + 1; + attempts += 1; } } } diff --git a/src/grpc/driver/moby.rs b/src/grpc/driver/moby.rs index c5dfd6fc..fa3fbd3f 100644 --- a/src/grpc/driver/moby.rs +++ b/src/grpc/driver/moby.rs @@ -1,8 +1,11 @@ #![cfg(feature = "buildkit")] use bollard_buildkit_proto::{health, moby::buildkit::v1::control_client::ControlClient}; +use bytes::Bytes; use http::{request::Builder, Method}; -use hyper::Body; +use http_body_util::Full; +use log::error; +use log::trace; use tonic::transport::{Channel, Endpoint}; use crate::{ @@ -41,7 +44,7 @@ impl Moby { let metadata_grpc_method: Vec = services.iter().flat_map(|s| s.names()).collect(); let req = self.docker.build_request( - &url, + url, Builder::new() .method(Method::POST) .header("Connection", "Upgrade") @@ -52,7 +55,7 @@ impl Moby { metadata_grpc_method.join(","), ), opt, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); let (read, write) = self.docker.process_upgraded(req).await?; @@ -72,7 +75,7 @@ impl Moby { router = service.append(router); } trace!("router: {:#?}", router); - match router + if let Err(e) = router .serve_with_incoming(futures_util::stream::iter(vec![Ok::< _, tonic::transport::Error, @@ -81,8 +84,7 @@ impl Moby { )])) .await { - Err(e) => error!("Failed to serve grpc connection: {}", e), - _ => (), + error!("Failed to serve grpc connection: {}", e) } }); diff --git a/src/grpc/error.rs b/src/grpc/error.rs index 87720f59..6b56e83c 100644 --- a/src/grpc/error.rs +++ b/src/grpc/error.rs @@ -78,19 +78,12 @@ pub enum GrpcAuthError { #[from] err: url::ParseError, }, - /// Error while building http headers when calling the registry - #[error("Invalid HTTP request failure during GRPC authentication with registry")] - HTTPError { - /// The original http error - #[from] - err: http::Error, - }, /// Error emitted by the hyper library during authentication with the registry #[error("Hyper error during GRPC authentication with registry")] HyperError { /// The source hyper error #[from] - err: hyper::Error, + err: hyper::http::Error, }, /// Error while deserializing the payload emitted by the registry #[error("Serde payload deserializing error during GRPC authentication")] @@ -104,6 +97,13 @@ pub enum GrpcAuthError { InvalidUriError { /// The invalid uri error #[from] - err: http::uri::InvalidUri, + err: hyper::http::uri::InvalidUri, + }, + /// Error that is emitted by the hyper-util legacy bridge client + #[error("Error in the hyper legacy client: {}", err)] + HyperLegacyError { + /// The original error emitted. + #[from] + err: hyper_util::client::legacy::Error, }, } diff --git a/src/grpc/export.rs b/src/grpc/export.rs index 47092612..926e4021 100644 --- a/src/grpc/export.rs +++ b/src/grpc/export.rs @@ -5,6 +5,8 @@ pub use bollard_buildkit_proto::health; pub use bollard_buildkit_proto::moby; use bytes::Bytes; +use log::debug; +use log::trace; use std::collections::HashMap; use std::net::IpAddr; @@ -61,13 +63,14 @@ impl ToString for ImageBuildHostIp { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Default, Debug, Clone, PartialEq)] #[non_exhaustive] /// Network mode to use for this container. Supported standard values are: `bridge`, `host`, /// `none`, and `container:`. Any other value is taken as a custom network's name to which /// this container should connect to. pub enum ImageBuildNetworkMode { /// Bridge mode networking + #[default] Bridge, /// Host mode networking Host, @@ -77,12 +80,6 @@ pub enum ImageBuildNetworkMode { Container(String), } -impl Default for ImageBuildNetworkMode { - fn default() -> Self { - ImageBuildNetworkMode::Bridge - } -} - impl ToString for ImageBuildNetworkMode { fn to_string(&self) -> String { match self { @@ -124,7 +121,7 @@ impl ImageBuildFrontendOptions { ImageBuildFrontendOptionsBuilder::new() } - pub(crate) fn to_map(self) -> HashMap { + pub(crate) fn into_map(self) -> HashMap { let mut attrs = HashMap::new(); if self.image_resolve_mode { @@ -319,13 +316,14 @@ pub struct ImageExporterOCIOutput { pub(crate) annotation: HashMap, } -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Default, Debug, Copy, Clone, PartialEq)] #[non_exhaustive] /// Compression type for the exported image tar file pub enum ImageExporterOCIOutputCompression { /// Emit the tar file uncompressed Uncompressed, /// Emit the tar file GZIP compressed + #[default] Gzip, /// Emit the tar file as a stargz snapshot Estargz, @@ -333,12 +331,6 @@ pub enum ImageExporterOCIOutputCompression { Zstd, } -impl Default for ImageExporterOCIOutputCompression { - fn default() -> Self { - ImageExporterOCIOutputCompression::Gzip - } -} - impl ToString for ImageExporterOCIOutputCompression { fn to_string(&self) -> String { match self { @@ -389,7 +381,7 @@ impl ImageExporterOCIOutput { } } - pub(crate) fn to_map(self) -> HashMap { + pub(crate) fn into_map(self) -> HashMap { let mut attrs = HashMap::new(); attrs.insert(String::from("name"), self.name); @@ -636,16 +628,14 @@ impl<'a> super::super::Docker { ) -> Result<(), GrpcError> { let buildkit_name = String::from(driver.name()); - let payload = match load_input { - ImageExporterLoadInput::Upload(bytes) => bytes, - }; + let ImageExporterLoadInput::Upload(bytes) = load_input; let mut upload_provider = super::UploadProvider::new(); - let context = upload_provider.add(payload.to_vec()); + let context = upload_provider.add(bytes.to_vec()); - let mut frontend_attrs = frontend_opts.to_map(); + let mut frontend_attrs = frontend_opts.into_map(); frontend_attrs.insert(String::from("context"), context); - let exporter_attrs = exporter_request.output.to_map(); + let exporter_attrs = exporter_request.output.into_map(); let mut auth_provider = super::AuthProvider::new(); if let Some(creds) = credentials { @@ -672,7 +662,7 @@ impl<'a> super::super::Docker { let id = super::new_id(); let solve_request = moby::buildkit::v1::SolveRequest { - r#ref: String::from(id), + r#ref: id, cache: None, definition: None, entitlements: vec![], diff --git a/src/grpc/io/reader_stream.rs b/src/grpc/io/reader_stream.rs index f164b8a6..88b0111d 100644 --- a/src/grpc/io/reader_stream.rs +++ b/src/grpc/io/reader_stream.rs @@ -2,6 +2,7 @@ use bollard_buildkit_proto::moby::buildkit::v1::BytesMessage; use futures_core::stream::Stream; +use log::error; use pin_project_lite::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; diff --git a/src/grpc/mod.rs b/src/grpc/mod.rs index 551acd9d..a9644888 100644 --- a/src/grpc/mod.rs +++ b/src/grpc/mod.rs @@ -32,9 +32,16 @@ use std::task::{Context, Poll}; use bollard_buildkit_proto::moby::filesync::v1::auth_server::AuthServer; use bollard_buildkit_proto::moby::filesync::v1::file_send_server::FileSendServer; +use bytes::Bytes; use futures_core::Stream; -use hyper::client::HttpConnector; +use http_body_util::{BodyExt, Full}; +use hyper_util::client::legacy::connect::HttpConnector; +use hyper_util::client::legacy::Client; +use hyper_util::rt::TokioExecutor; +use log::trace; use rand::RngCore; +use rustls::ALL_VERSIONS; +use serde_derive::Deserialize; use tonic::transport::NamedService; use tonic::{Code, Request, Response, Status, Streaming}; @@ -42,9 +49,9 @@ use futures_util::{StreamExt, TryFutureExt}; use tokio::io::AsyncWriteExt; use http::request::Builder; -use hyper::{Body, Client, Method}; +use hyper::Method; use std::future::Future; -use tower::Service; +use tower_service::Service; use self::error::GrpcAuthError; use self::io::GrpcTransport; @@ -130,6 +137,8 @@ impl Health for HealthServerImpl { Err(Status::new(Code::NotFound, "unknown service")) } } + + #[allow(clippy::diverging_sub_expression)] async fn watch( &self, _request: Request, @@ -171,7 +180,7 @@ impl FileSend for FileSendImpl { Ok(v) => { file.write_all(&v.data).await?; } - Err(err) => return Err(err.into()), + Err(err) => return Err(err), } } @@ -333,24 +342,34 @@ impl AuthProvider { } } - fn ssl_client() -> Result>, GrpcAuthError> { + fn ssl_client( + ) -> Result, Full>, GrpcAuthError> + { let mut root_store = rustls::RootCertStore::empty(); + + #[cfg(not(feature = "webpki"))] for cert in rustls_native_certs::load_native_certs()? { - root_store.add(&rustls::Certificate(cert.0))?; + root_store + .add(cert) + .map_err(|err| GrpcAuthError::RustTlsError { err })? } + #[cfg(feature = "webpki")] + root_store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned()); - let config = rustls::ClientConfig::builder() - .with_safe_defaults() + let config = rustls::ClientConfig::builder_with_protocol_versions(ALL_VERSIONS) .with_root_certificates(root_store) .with_no_client_auth(); let mut http_connector = HttpConnector::new(); http_connector.enforce_http(false); - let https_connector: hyper_rustls::HttpsConnector = - hyper_rustls::HttpsConnector::from((http_connector, config)); + let https_connector = hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(config) + .https_or_http() + .enable_http1() + .build(); - let client_builder = Client::builder(); + let client_builder = Client::builder(TokioExecutor::new()); let client = client_builder.build(https_connector); Ok(client) @@ -385,19 +404,19 @@ impl AuthProvider { let full_uri = format!("{}?{}", opts.realm, ¶ms); let request_uri: hyper::Uri = full_uri.try_into()?; - let request = hyper::Request::post(request_uri).body(Body::empty())?; + let request = hyper::Request::post(request_uri).body(Full::new(Bytes::new()))?; let response = client.request(request).await?; let status = response.status().as_u16(); - if status < 200 || status >= 400 { + if !(200..400).contains(&status) { // return custom error return Err(GrpcAuthError::BadRegistryResponse { status_code: status, }); } - let bytes = hyper::body::to_bytes(response.into_body()).await?; + let bytes = response.into_body().collect().await.unwrap().to_bytes(); let oauth_token = serde_json::from_slice::(&bytes)?; @@ -458,12 +477,15 @@ impl Auth for AuthProvider { } } + #[allow(clippy::diverging_sub_expression)] async fn get_token_authority( &self, _request: Request, ) -> Result, Status> { unimplemented!() } + + #[allow(clippy::diverging_sub_expression)] async fn verify_token_authority( &self, _request: Request, @@ -477,7 +499,7 @@ pub(crate) struct GrpcClient { pub(crate) session_id: String, } -impl Service for GrpcClient { +impl Service for GrpcClient { type Response = GrpcTransport; type Error = error::GrpcError; type Future = Pin> + Send>>; @@ -486,33 +508,30 @@ impl Service for GrpcClient { Poll::Ready(Ok(())) } - fn call(&mut self, _req: http::Uri) -> Self::Future { + fn call(&mut self, _req: tonic::transport::Uri) -> Self::Future { // create the body let opt: Option = None; let url = "/grpc"; let client = self.client.clone(); let req = client.build_request( - &url, + url, Builder::new() .method(Method::POST) .header("Connection", "Upgrade") .header("Upgrade", "h2c") .header("X-Docker-Expose-Session-Uuid", &self.session_id), opt, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); let fut = async move { - client - .process_upgraded(req) - .await - .and_then(|(read, write)| { - let output = Box::pin(read); - let input = Box::pin(write); - Ok(GrpcTransport { - read: output, - write: input, - }) - }) + client.process_upgraded(req).await.map(|(read, write)| { + let output = Box::pin(read); + let input = Box::pin(write); + GrpcTransport { + read: output, + write: input, + } + }) }; // Return the response as an immediate future diff --git a/src/image.rs b/src/image.rs index 29d33e68..b0d5c31e 100644 --- a/src/image.rs +++ b/src/image.rs @@ -1,11 +1,13 @@ //! Image API: creating, manipulating and pushing docker images +use bytes::Bytes; use futures_core::Stream; #[cfg(feature = "buildkit")] use futures_util::future::{Either, FutureExt}; use futures_util::{stream, stream::StreamExt}; use http::header::CONTENT_TYPE; use http::request::Builder; -use hyper::{body::Bytes, Body, Method}; +use http_body_util::Full; +use hyper::Method; use serde::Serialize; use serde_repr::*; @@ -524,7 +526,7 @@ impl Docker { url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -573,7 +575,7 @@ impl Docker { pub fn create_image( &self, options: Option>, - root_fs: Option, + root_fs: Option, credentials: Option, ) -> impl Stream> where @@ -592,8 +594,8 @@ impl Docker { .header("X-Registry-Auth", base64_url_encode(&ser_cred)), options, match root_fs { - Some(body) => Ok(body), - None => Ok(Body::empty()), + Some(body) => Ok(Full::new(body)), + None => Ok(Full::new(Bytes::new())), }, ); self.process_into_stream(req).boxed() @@ -643,7 +645,7 @@ impl Docker { &url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -694,7 +696,7 @@ impl Docker { url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -730,7 +732,7 @@ impl Docker { &url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -785,7 +787,7 @@ impl Docker { url, Builder::new().method(Method::GET), Some(options), - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -841,7 +843,7 @@ impl Docker { .method(Method::DELETE) .header("X-Registry-Auth", base64_url_encode(&ser_cred)), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await } @@ -894,7 +896,7 @@ impl Docker { &url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -961,7 +963,7 @@ impl Docker { .header(CONTENT_TYPE, "application/json") .header("X-Registry-Auth", base64_url_encode(&ser_cred)), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_stream(req).boxed() @@ -1091,7 +1093,7 @@ impl Docker { &self, options: BuildImageOptions, credentials: Option>, - tar: Option, + tar: Option, ) -> impl Stream> + '_ where T: Into + Eq + Hash + Serialize, @@ -1124,7 +1126,7 @@ impl Docker { .method(Method::POST) .header(CONTENT_TYPE, "application/x-tar"), Some(options), - Ok(tar.unwrap_or_else(Body::empty)), + Ok(Full::new(tar.unwrap_or_default())), ); let session = stream::once( @@ -1133,9 +1135,7 @@ impl Docker { .fuse(), ); - let stream = self - .process_into_stream::(req) - .map(|data| Either::Left(data)); + let stream = self.process_into_stream::(req).map(Either::Left); futures_util::stream::select(stream, session) .filter_map(|either| async move { @@ -1165,7 +1165,7 @@ impl Docker { .header(CONTENT_TYPE, "application/x-tar") .header("X-Registry-Config", base64_url_encode(&ser_cred)), Some(options), - Ok(tar.unwrap_or_else(Body::empty)), + Ok(Full::new(tar.unwrap_or_default())), ); self.process_into_stream(req).boxed() @@ -1198,7 +1198,7 @@ impl Docker { let opt: Option = None; let req = self.build_request( - &url, + url, Builder::new() .method(Method::POST) .header("Connection", "Upgrade") @@ -1214,7 +1214,7 @@ impl Docker { ) .header("X-Docker-Expose-Session-Uuid", &id), opt, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); let (read, write) = self.process_upgraded(req).await?; @@ -1273,7 +1273,7 @@ impl Docker { .method(Method::GET) .header(CONTENT_TYPE, "application/json"), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_body(req) } @@ -1300,7 +1300,7 @@ impl Docker { .method(Method::GET) .header(CONTENT_TYPE, "application/json"), Some(options), - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_body(req) } @@ -1331,7 +1331,7 @@ impl Docker { /// use bollard::errors::Error; /// /// use std::default::Default; - /// use futures_util::stream::StreamExt; + /// use futures_util::stream::{StreamExt, TryStreamExt}; /// use tokio::fs::File; /// use tokio::io::AsyncWriteExt; /// use tokio_util::codec; @@ -1343,19 +1343,19 @@ impl Docker { /// async move { /// let mut file = File::open("tarball.tar.gz").await.unwrap(); /// - /// let byte_stream = codec::FramedRead::new(file, codec::BytesCodec::new()).map(|r| { + /// let mut byte_stream = codec::FramedRead::new(file, codec::BytesCodec::new()).map(|r| { /// let bytes = r.unwrap().freeze(); /// Ok::<_, Error>(bytes) /// }); - - /// let body = hyper::Body::wrap_stream(byte_stream); - + /// + /// let bytes = byte_stream.next().await.unwrap().unwrap(); + /// /// let mut stream = docker /// .import_image( /// ImportImageOptions { /// ..Default::default() /// }, - /// body, + /// bytes, /// None, /// ); /// @@ -1367,7 +1367,7 @@ impl Docker { pub fn import_image( &self, options: ImportImageOptions, - root_fs: Body, + root_fs: Bytes, credentials: Option>, ) -> impl Stream> { match serde_json::to_string(&credentials.unwrap_or_default()) { @@ -1379,7 +1379,7 @@ impl Docker { .header(CONTENT_TYPE, "application/json") .header("X-Registry-Config", base64_url_encode(&ser_cred)), Some(options), - Ok(root_fs), + Ok(Full::new(root_fs)), ); self.process_into_stream(req).boxed() } diff --git a/src/lib.rs b/src/lib.rs index 14aa04dc..98bb5546 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -268,11 +268,6 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] #![warn(rust_2018_idioms)] -#[macro_use] -extern crate serde_derive; -#[macro_use] -extern crate log; - // declare modules pub mod auth; pub mod container; diff --git a/src/named_pipe.rs b/src/named_pipe.rs index 3f7ca2da..0ae58e2a 100644 --- a/src/named_pipe.rs +++ b/src/named_pipe.rs @@ -1,8 +1,10 @@ #![cfg(windows)] -use hyper::client::connect::Connected; +use hyper::rt::ReadBufCursor; +use hyper_util::client::legacy::connect::{Connected, Connection}; +use hyper_util::rt::TokioIo; use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::io::AsyncWrite; use tokio::net::windows::named_pipe::{ClientOptions, NamedPipeClient}; use tokio::time; @@ -47,17 +49,18 @@ impl NamedPipeStream { } } -impl AsyncRead for NamedPipeStream { +impl hyper::rt::Read for NamedPipeStream { fn poll_read( - mut self: Pin<&mut Self>, + self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, + buf: ReadBufCursor<'_>, ) -> Poll> { - Pin::new(&mut self.io).poll_read(cx, buf) + let mut t = TokioIo::new(self.project().io); + Pin::new(&mut t).poll_read(cx, buf) } } -impl AsyncWrite for NamedPipeStream { +impl hyper::rt::Write for NamedPipeStream { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -86,7 +89,7 @@ impl AsyncWrite for NamedPipeStream { #[derive(Clone, Copy, Debug)] pub struct NamedPipeConnector; -impl hyper::service::Service for NamedPipeConnector { +impl tower_service::Service for NamedPipeConnector { type Response = NamedPipeStream; type Error = io::Error; type Future = @@ -120,7 +123,7 @@ impl hyper::service::Service for NamedPipeConnector { } } -impl hyper::client::connect::Connection for NamedPipeStream { +impl Connection for NamedPipeStream { fn connected(&self) -> Connected { Connected::new() } diff --git a/src/network.rs b/src/network.rs index 2114fe2d..739b6a10 100644 --- a/src/network.rs +++ b/src/network.rs @@ -1,8 +1,10 @@ //! Network API: Networks are user-defined networks that containers can be attached to. +use bytes::Bytes; use http::request::Builder; -use hyper::{Body, Method}; -use serde::ser::Serialize; +use http_body_util::Full; +use hyper::Method; +use serde_derive::{Deserialize, Serialize}; use std::cmp::Eq; use std::collections::HashMap; @@ -71,7 +73,7 @@ where #[derive(Debug, Clone, Default, PartialEq, Serialize)] pub struct InspectNetworkOptions where - T: Into + Serialize, + T: Into + serde::ser::Serialize, { /// Detailed inspect output for troubleshooting. pub verbose: bool, @@ -107,7 +109,7 @@ where #[derive(Debug, Clone, Default, PartialEq, Serialize)] pub struct ListNetworksOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: /// - `driver=` Matches a network's driver. @@ -125,7 +127,7 @@ where #[serde(rename_all = "PascalCase")] pub struct ConnectNetworkOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// The ID or name of the container to connect to the network. pub container: T, @@ -138,7 +140,7 @@ where #[serde(rename_all = "PascalCase")] pub struct DisconnectNetworkOptions where - T: Into + Serialize, + T: Into + serde::ser::Serialize, { /// The ID or name of the container to disconnect from the network. pub container: T, @@ -174,7 +176,7 @@ where #[derive(Debug, Clone, Default, PartialEq, Serialize)] pub struct PruneNetworksOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// Filters to process on the prune list, encoded as JSON. /// - `until=` Prune networks created before this timestamp. The `` can be @@ -224,7 +226,7 @@ impl Docker { config: CreateNetworkOptions, ) -> Result where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/networks/create"; @@ -265,7 +267,7 @@ impl Docker { &url, Builder::new().method(Method::DELETE), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -307,7 +309,7 @@ impl Docker { options: Option>, ) -> Result where - T: Into + Serialize, + T: Into + serde::ser::Serialize, { let url = format!("/networks/{network_name}"); @@ -315,7 +317,7 @@ impl Docker { &url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -358,7 +360,7 @@ impl Docker { options: Option>, ) -> Result, Error> where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/networks"; @@ -366,7 +368,7 @@ impl Docker { url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -415,7 +417,7 @@ impl Docker { config: ConnectNetworkOptions, ) -> Result<(), Error> where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = format!("/networks/{network_name}/connect"); @@ -464,7 +466,7 @@ impl Docker { config: DisconnectNetworkOptions, ) -> Result<(), Error> where - T: Into + Serialize, + T: Into + serde::ser::Serialize, { let url = format!("/networks/{network_name}/disconnect"); @@ -516,7 +518,7 @@ impl Docker { options: Option>, ) -> Result where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/networks/prune"; @@ -524,7 +526,7 @@ impl Docker { url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await diff --git a/src/read.rs b/src/read.rs index 69e233d8..cf322b0a 100644 --- a/src/read.rs +++ b/src/read.rs @@ -1,13 +1,19 @@ use bytes::Buf; use bytes::BytesMut; use futures_core::Stream; +use hyper::body::Body; use hyper::body::Bytes; +use hyper::body::Incoming; +use hyper::upgrade::Upgraded; +use log::debug; +use log::trace; use pin_project_lite::pin_project; use serde::de::DeserializeOwned; use std::pin::Pin; -use std::string::String; use std::task::{Context, Poll}; use std::{cmp, io, marker::PhantomData}; + +use tokio::io::AsyncWrite; use tokio::io::{AsyncRead, ReadBuf}; use tokio_util::codec::Decoder; @@ -48,41 +54,31 @@ impl Decoder for NewlineLogOutputDecoder { // `start_exec` API on unix socket will emit values without a header if !src.is_empty() && src[0] > 2 { if self.is_tcp { - trace!("NewlineLogOutputDecoder: no header, but is_tcp is true returning raw data"); return Ok(Some(LogOutput::Console { message: src.split().freeze(), })); } let nl_index = src.iter().position(|b| *b == b'\n'); if let Some(pos) = nl_index { - trace!("NewlineLogOutputDecoder: newline found, pos = {}", pos + 1); return Ok(Some(LogOutput::Console { message: src.split_to(pos + 1).freeze(), })); } else { - trace!("NewlineLogOutputDecoder: no newline found"); return Ok(None); } } if src.len() < 8 { - trace!("NewlineLogOutputDecoder: not enough data for read header"); return Ok(None); } let header = src.split_to(8); let length = u32::from_be_bytes([header[4], header[5], header[6], header[7]]) as usize; - trace!( - "NewlineLogOutputDecoder: read header, type = {}, length = {}", - header[0], - length - ); self.state = NewlineLogOutputDecoderState::WaitingPayload(header[0], length); } NewlineLogOutputDecoderState::WaitingPayload(typ, length) => { if src.len() < length { - trace!("NewlineLogOutputDecoder: not enough data to read"); return Ok(None); } else { trace!("NewlineLogOutputDecoder: Reading payload"); @@ -190,19 +186,16 @@ enum ReadState { pin_project! { #[derive(Debug)] - pub(crate) struct StreamReader { + pub(crate) struct StreamReader { #[pin] - stream: S, + stream: Incoming, state: ReadState, } } -impl StreamReader -where - S: Stream>, -{ +impl StreamReader { #[inline] - pub(crate) fn new(stream: S) -> StreamReader { + pub(crate) fn new(stream: Incoming) -> StreamReader { StreamReader { stream, state: ReadState::NotReady, @@ -210,18 +203,14 @@ where } } -impl AsyncRead for StreamReader -where - S: Stream>, -{ +impl AsyncRead for StreamReader { fn poll_read( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, read_buf: &mut ReadBuf<'_>, ) -> Poll> { - let mut this = self.project(); loop { - match this.state { + match self.as_mut().project().state { ReadState::Ready(ref mut chunk, ref mut pos) => { let chunk_start = *pos; let buf = read_buf.initialize_unfilled(); @@ -237,12 +226,14 @@ where } } - ReadState::NotReady => match this.stream.as_mut().poll_next(cx) { - Poll::Ready(Some(Ok(chunk))) => { - *this.state = ReadState::Ready(chunk, 0); + ReadState::NotReady => match self.as_mut().project().stream.poll_frame(cx) { + Poll::Ready(Some(Ok(frame))) if frame.is_data() => { + *self.as_mut().project().state = + ReadState::Ready(frame.into_data().unwrap(), 0); continue; } + Poll::Ready(Some(Ok(_frame))) => return Poll::Ready(Ok(())), Poll::Ready(None) => return Poll::Ready(Ok(())), Poll::Pending => { return Poll::Pending; @@ -256,13 +247,92 @@ where }, } - *this.state = ReadState::NotReady; + *self.as_mut().project().state = ReadState::NotReady; return Poll::Ready(Ok(())); } } } +pin_project! { + #[derive(Debug)] + pub(crate) struct AsyncUpgraded { + #[pin] + inner: Upgraded, + } +} + +impl AsyncUpgraded { + pub(crate) fn new(upgraded: Upgraded) -> Self { + Self { inner: upgraded } + } +} + +impl AsyncRead for AsyncUpgraded { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + read_buf: &mut ReadBuf<'_>, + ) -> Poll> { + let n = { + let mut hbuf = hyper::rt::ReadBuf::new(read_buf.initialize_unfilled()); + match hyper::rt::Read::poll_read(self.project().inner, cx, hbuf.unfilled()) { + Poll::Ready(Ok(())) => hbuf.filled().len(), + other => return other, + } + }; + read_buf.advance(n); + + Poll::Ready(Ok(())) + } +} + +impl AsyncWrite for AsyncUpgraded { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + hyper::rt::Write::poll_write(self.project().inner, cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + hyper::rt::Write::poll_flush(self.project().inner, cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + hyper::rt::Write::poll_shutdown(self.project().inner, cx) + } +} + +pin_project! { + #[derive(Debug)] + pub(crate) struct IncomingStream { + #[pin] + inner: Incoming, + } +} + +impl IncomingStream { + pub(crate) fn new(incoming: Incoming) -> Self { + Self { inner: incoming } + } +} + +impl Stream for IncomingStream { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match futures_util::ready!(self.as_mut().project().inner.poll_frame(cx)?) { + Some(frame) => match frame.into_data() { + Ok(data) => Poll::Ready(Some(Ok(data))), + Err(_) => Poll::Ready(None), + }, + None => Poll::Ready(None), + } + } +} + #[cfg(test)] mod tests { use std::collections::HashMap; diff --git a/src/secret.rs b/src/secret.rs index 20081fd8..4ef98c43 100644 --- a/src/secret.rs +++ b/src/secret.rs @@ -4,9 +4,11 @@ pub use crate::models::*; use super::Docker; use crate::errors::Error; +use bytes::Bytes; use http::request::Builder; -use hyper::{Body, Method}; -use serde::ser::Serialize; +use http_body_util::Full; +use hyper::Method; +use serde_derive::Serialize; use std::{collections::HashMap, hash::Hash}; /// Parameters used in the [List Secret API](super::Docker::list_secrets()) @@ -35,7 +37,7 @@ use std::{collections::HashMap, hash::Hash}; #[derive(Debug, Clone, Default, PartialEq, Serialize)] pub struct ListSecretsOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// Filters to process on the secret list, encoded as JSON. Available filters: /// - `id`=`` a secret's ID @@ -104,7 +106,7 @@ impl Docker { options: Option>, ) -> Result, Error> where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/secrets"; @@ -112,7 +114,7 @@ impl Docker { url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -194,7 +196,7 @@ impl Docker { &url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -230,7 +232,7 @@ impl Docker { &url, Builder::new().method(Method::DELETE), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await diff --git a/src/service.rs b/src/service.rs index 9dd92be0..49ac7dbc 100644 --- a/src/service.rs +++ b/src/service.rs @@ -5,10 +5,12 @@ pub use crate::models::*; use super::Docker; use crate::auth::{base64_url_encode, DockerCredentials}; use crate::errors::Error; +use bytes::Bytes; use http::header::CONTENT_TYPE; use http::request::Builder; -use hyper::{Body, Method}; -use serde::ser::Serialize; +use http_body_util::Full; +use hyper::Method; +use serde_derive::Serialize; use std::{collections::HashMap, hash::Hash}; @@ -38,7 +40,7 @@ use std::{collections::HashMap, hash::Hash}; #[derive(Debug, Clone, Default, PartialEq, Serialize)] pub struct ListServicesOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// Filters to process on the service list, encoded as JSON. Available filters: /// - `id`=`` a services's ID @@ -155,7 +157,7 @@ impl Docker { options: Option>, ) -> Result, Error> where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/services"; @@ -163,7 +165,7 @@ impl Docker { url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -287,7 +289,7 @@ impl Docker { &url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -322,7 +324,7 @@ impl Docker { &url, Builder::new().method(Method::DELETE), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await diff --git a/src/system.rs b/src/system.rs index 481e1469..9ed50e17 100644 --- a/src/system.rs +++ b/src/system.rs @@ -1,9 +1,11 @@ //! System API: interface for interacting with the Docker server and/or Registry. +use bytes::Bytes; use futures_core::Stream; use http::request::Builder; -use hyper::{Body, Method}; -use serde::ser::Serialize; +use http_body_util::Full; +use hyper::Method; +use serde_derive::{Deserialize, Serialize}; use serde_json::value::Value; use std::collections::HashMap; @@ -117,7 +119,7 @@ pub struct VersionComponents { #[derive(Debug, Default, Clone, PartialEq, Serialize)] pub struct EventsOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// Show events created since this timestamp then stream new events. #[cfg(all(feature = "chrono", not(feature = "time")))] @@ -184,7 +186,7 @@ impl Docker { "/version", Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -212,7 +214,7 @@ impl Docker { "/info", Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -238,7 +240,7 @@ impl Docker { url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_string(req).await @@ -276,7 +278,7 @@ impl Docker { options: Option>, ) -> impl Stream> where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/events"; @@ -284,7 +286,7 @@ impl Docker { url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_stream(req) @@ -316,7 +318,7 @@ impl Docker { url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await diff --git a/src/uri.rs b/src/uri.rs index eca1ba19..1b98cdf7 100644 --- a/src/uri.rs +++ b/src/uri.rs @@ -1,6 +1,7 @@ #[cfg(windows)] use hex::FromHex; use hyper::Uri as HyperUri; +use log::trace; use url::Url; use std::borrow::Cow; diff --git a/src/volume.rs b/src/volume.rs index 9361505a..0fed8a64 100644 --- a/src/volume.rs +++ b/src/volume.rs @@ -1,8 +1,10 @@ //! Volume API: Create and manage persistent storage that can be attached to containers. +use bytes::Bytes; use http::request::Builder; -use hyper::{Body, Method}; -use serde::Serialize; +use http_body_util::Full; +use hyper::Method; +use serde_derive::{Deserialize, Serialize}; use std::cmp::Eq; use std::collections::HashMap; @@ -16,7 +18,7 @@ use crate::models::*; #[derive(Debug, Clone, Default, PartialEq, Serialize)] pub struct ListVolumesOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: /// - `dangling=` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. @@ -33,7 +35,7 @@ where #[serde(rename_all = "PascalCase")] pub struct CreateVolumeOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// The new volume's name. If not specified, Docker generates a name. pub name: T, @@ -82,7 +84,7 @@ pub struct RemoveVolumeOptions { #[derive(Debug, Clone, Default, PartialEq, Serialize)] pub struct PruneVolumesOptions where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { /// Filters to process on the prune list, encoded as JSON. /// - `label` (`label=`, `label==`, `label!=`, or @@ -130,7 +132,7 @@ impl Docker { options: Option>, ) -> Result where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/volumes"; @@ -138,7 +140,7 @@ impl Docker { url, Builder::new().method(Method::GET), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -178,7 +180,7 @@ impl Docker { /// ``` pub async fn create_volume(&self, config: CreateVolumeOptions) -> Result where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/volumes/create"; @@ -220,7 +222,7 @@ impl Docker { &url, Builder::new().method(Method::GET), None::, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await @@ -267,7 +269,7 @@ impl Docker { &url, Builder::new().method(Method::DELETE), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_unit(req).await @@ -311,7 +313,7 @@ impl Docker { options: Option>, ) -> Result where - T: Into + Eq + Hash + Serialize, + T: Into + Eq + Hash + serde::ser::Serialize, { let url = "/volumes/prune"; @@ -319,7 +321,7 @@ impl Docker { url, Builder::new().method(Method::POST), options, - Ok(Body::empty()), + Ok(Full::new(Bytes::new())), ); self.process_into_value(req).await diff --git a/tests/export_test.rs b/tests/export_test.rs index 1df75928..a1079956 100644 --- a/tests/export_test.rs +++ b/tests/export_test.rs @@ -45,7 +45,7 @@ async fn export_buildkit_oci_test(mut docker: Docker) -> Result<(), Error> { // cleanup - usually for local testing, the grpc handler will overwrite if dest_path.exists() { - std::fs::remove_file(&dest_path).unwrap(); + std::fs::remove_file(dest_path).unwrap(); } assert!(!dest_path.exists()); @@ -53,7 +53,7 @@ async fn export_buildkit_oci_test(mut docker: Docker) -> Result<(), Error> { "docker.io/library/bollard-oci-export-buildkit-example:latest", ) .annotation("exporter", "Bollard") - .dest(&dest_path); + .dest(dest_path); let buildkit_builder = DockerContainerBuilder::new("bollard_export_test_export_oci_image", &docker, session_id); @@ -85,13 +85,13 @@ async fn export_buildkit_oci_test(mut docker: Docker) -> Result<(), Error> { assert!(dest_path.exists()); - let oci_file = std::fs::File::open(&dest_path)?; + let oci_file = std::fs::File::open(dest_path)?; let mut oci_archive = tar::Archive::new(oci_file); let mut paths = vec![]; - let mut iter = oci_archive.entries()?; - while let Some(entry) = iter.next() { + let iter = oci_archive.entries()?; + for entry in iter { let entry = entry?; let path = entry.path()?.display().to_string(); paths.push(path); diff --git a/tests/image_test.rs b/tests/image_test.rs index 168a5374..6c0b7cea 100644 --- a/tests/image_test.rs +++ b/tests/image_test.rs @@ -1,5 +1,6 @@ #![type_length_limit = "2097152"] +use bytes::BufMut; use futures_util::future::ready; use futures_util::stream::{StreamExt, TryStreamExt}; use tokio::runtime::Runtime; @@ -36,7 +37,7 @@ async fn create_image_wasm_test(docker: Docker) -> Result<(), Error> { ..Default::default() }; - let req_body = hyper::body::Body::from({ + let req_body = bytes::Bytes::from({ let mut buffer = Vec::new(); { @@ -476,7 +477,7 @@ ENTRYPOINT ls buildkit-bollard.txt .await?; assert!(build - .into_iter() + .iter() .flat_map(|build_info| { if let Some(aux) = &build_info.aux { match aux { @@ -562,7 +563,6 @@ ENTRYPOINT ls buildkit-bollard.txt c.write_all(&uncompressed).unwrap(); let compressed = c.finish().unwrap(); - let id = "build_buildkit_image_test"; let build = &docker .build_image( BuildImageOptions { @@ -728,30 +728,13 @@ async fn import_image_test(docker: Docker) -> Result<(), Error> { } else { format!("{}hello-world:linux", registry_http_addr()) }; - let temp_file = if cfg!(windows) { - "C:\\Users\\appveyor\\Appdata\\Local\\Temp\\bollard_test_import_image.tar" - } else { - "/tmp/bollard_test_import_image.tar" - }; let mut res = docker.export_image(&image); - use tokio::io::AsyncWriteExt; - use tokio_util::codec; - let mut archive_file = tokio::fs::File::create(temp_file).await?; + let mut buf = bytes::BytesMut::new(); while let Some(data) = res.next().await { - archive_file.write_all(&data.unwrap()).await?; - archive_file.sync_all().await?; + buf.put_slice(&data.unwrap()); } - drop(archive_file); - - let archive_file = tokio::fs::File::open(temp_file).await?; - let byte_stream = codec::FramedRead::new(archive_file, codec::BytesCodec::new()).map(|r| { - let bytes = r.unwrap().freeze(); - Ok::<_, Error>(bytes) - }); - - let body = hyper::Body::wrap_stream(byte_stream); let mut creds = HashMap::new(); creds.insert( @@ -764,7 +747,7 @@ async fn import_image_test(docker: Docker) -> Result<(), Error> { ImportImageOptions { ..Default::default() }, - body, + buf.freeze(), Some(creds), ) .try_collect::>() diff --git a/tests/version_test.rs b/tests/version_test.rs index 963d3cea..93a0b28a 100644 --- a/tests/version_test.rs +++ b/tests/version_test.rs @@ -20,6 +20,7 @@ fn test_version_named_pipe() { #[cfg(all(unix, not(feature = "test_http")))] #[test] +#[allow(clippy::redundant_closure_call)] fn test_version_unix() { rt_exec!( Docker::connect_with_unix_defaults().unwrap().version(), From dc4ed14474c02e5dcc1ddeef0eb20728e7156f9e Mon Sep 17 00:00:00 2001 From: Niel Drummond Date: Wed, 24 Jan 2024 09:24:36 +0000 Subject: [PATCH 2/2] chore(models): Bump bollard-stubs to use API 1.44 --- Cargo.toml | 2 +- README.md | 4 +- codegen/swagger/Cargo.toml | 2 +- codegen/swagger/README.md | 6 +- codegen/swagger/pom.xml | 4 +- codegen/swagger/src/lib.rs | 2 +- codegen/swagger/src/models.rs | 267 +++++++++++++++++++++++++++------- src/lib.rs | 4 +- 8 files changed, 229 insertions(+), 62 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 65162a66..ea1e30b1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ time = ["dep:time", "bollard-stubs/time"] [dependencies] base64 = "0.21" -bollard-stubs = { path = "codegen/swagger", version = "=1.43.0-rc.3", default-features = false } +bollard-stubs = { path = "codegen/swagger", version = "=1.44.0-rc.1", default-features = false } bollard-buildkit-proto = { path = "codegen/proto", version = "=0.2.1", optional = true } bytes = "1" chrono = { version = "0.4", default-features = false, features = ["std", "clock", "serde"], optional = true } diff --git a/README.md b/README.md index 48536217..0ed01c86 100644 --- a/README.md +++ b/README.md @@ -53,8 +53,8 @@ encouraged. ### Version -The [Docker API](https://docs.docker.com/engine/api/v1.43/) used by Bollard is using the latest -`1.43` documentation schema published by the [moby](https://github.com/moby/moby) project to +The [Docker API](https://docs.docker.com/engine/api/v1.44/) used by Bollard is using the latest +`1.44` documentation schema published by the [moby](https://github.com/moby/moby) project to generate its serialization interface. This library also supports [version diff --git a/codegen/swagger/Cargo.toml b/codegen/swagger/Cargo.toml index b3a07952..0ae7dbad 100644 --- a/codegen/swagger/Cargo.toml +++ b/codegen/swagger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bollard-stubs" -version = "1.43.0-rc.3" +version = "1.44.0-rc.1" authors = [ "Bollard contributors" ] description = "Stubs used for the Bollard rust async Docker client API" license = "Apache-2.0" diff --git a/codegen/swagger/README.md b/codegen/swagger/README.md index 61b7faf8..3e532b8f 100644 --- a/codegen/swagger/README.md +++ b/codegen/swagger/README.md @@ -7,9 +7,9 @@ To see how to make this your own, look here: [README](https://github.com/swagger-api/swagger-codegen/blob/master/README.md) -- API version: 1.43.0-rc.3 -- Code generation suffix: 1.43.0-rc.3 -- Build date: 2024-01-15T11:26:03.143Z +- API version: 1.44.0-rc.1 +- Code generation suffix: 1.44.0-rc.1 +- Build date: 2024-01-24T09:26:30.020Z This autogenerated project defines an API crate `bollard-stubs` which contains: * Data types representing the underlying data model. diff --git a/codegen/swagger/pom.xml b/codegen/swagger/pom.xml index 17ffe2a3..75ba5a6e 100644 --- a/codegen/swagger/pom.xml +++ b/codegen/swagger/pom.xml @@ -19,7 +19,7 @@ generate - https://raw.githubusercontent.com/moby/moby/v24.0.0/docs/api/v1.43.yaml + https://raw.githubusercontent.com/moby/moby/v25.0.0/docs/api/v1.44.yaml bollard.BollardCodegen ${project.basedir} true @@ -27,7 +27,7 @@ models.rs,lib.rs,Cargo.toml,config,README.md bollard-stubs - 1.43.0-rc.3 + 1.44.0-rc.1 diff --git a/codegen/swagger/src/lib.rs b/codegen/swagger/src/lib.rs index a5627781..95c5dfe8 100644 --- a/codegen/swagger/src/lib.rs +++ b/codegen/swagger/src/lib.rs @@ -13,7 +13,7 @@ use std::io::Error; #[allow(unused_imports)] use std::collections::HashMap; -pub const BASE_PATH: &str = "/v1.43"; +pub const BASE_PATH: &str = "/v1.44"; pub mod models; diff --git a/codegen/swagger/src/models.rs b/codegen/swagger/src/models.rs index 11f8a330..adb3bc31 100644 --- a/codegen/swagger/src/models.rs +++ b/codegen/swagger/src/models.rs @@ -979,7 +979,7 @@ pub struct ContainerConfig { #[serde(skip_serializing_if = "Option::is_none")] pub network_disabled: Option, - /// MAC address of the container. + /// MAC address of the container. Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. #[serde(rename = "MacAddress")] #[serde(skip_serializing_if = "Option::is_none")] pub mac_address: Option, @@ -1282,6 +1282,23 @@ impl ::std::convert::AsRef for ContainerStateStatusEnum { } } +/// represents the status of a container. +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct ContainerStatus { + #[serde(rename = "ContainerID")] + #[serde(skip_serializing_if = "Option::is_none")] + pub container_id: Option, + + #[serde(rename = "PID")] + #[serde(skip_serializing_if = "Option::is_none")] + pub pid: Option, + + #[serde(rename = "ExitCode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub exit_code: Option, + +} + #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct ContainerSummary { /// The ID of this container @@ -1669,6 +1686,11 @@ pub struct EndpointSettings { #[serde(skip_serializing_if = "Option::is_none")] pub links: Option>, + /// MAC address for the endpoint on this network. The network driver might ignore this parameter. + #[serde(rename = "MacAddress")] + #[serde(skip_serializing_if = "Option::is_none")] + pub mac_address: Option, + #[serde(rename = "Aliases")] #[serde(skip_serializing_if = "Option::is_none")] pub aliases: Option>, @@ -1713,16 +1735,16 @@ pub struct EndpointSettings { #[serde(skip_serializing_if = "Option::is_none")] pub global_ipv6_prefix_len: Option, - /// MAC address for the endpoint on this network. - #[serde(rename = "MacAddress")] - #[serde(skip_serializing_if = "Option::is_none")] - pub mac_address: Option, - /// DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. #[serde(rename = "DriverOpts")] #[serde(skip_serializing_if = "Option::is_none")] pub driver_opts: Option>, + /// List of all DNS names an endpoint has on a specific network. This list is based on the container name, network aliases, container short ID, and hostname. These DNS names are non-fully qualified but can contain several dots. You can get fully qualified DNS names by appending `.`. For instance, if container name is `my.ctr` and the network is named `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be `my.ctr.testnet`. + #[serde(rename = "DNSNames")] + #[serde(skip_serializing_if = "Option::is_none")] + pub dns_names: Option>, + } /// Properties that can be configured to access and load balance a service. @@ -2311,6 +2333,11 @@ pub struct HealthConfig { #[serde(skip_serializing_if = "Option::is_none")] pub start_period: Option, + /// The time to wait between checks in nanoseconds during the start period. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + #[serde(rename = "StartInterval")] + #[serde(skip_serializing_if = "Option::is_none")] + pub start_interval: Option, + } /// HealthcheckResult stores information about a single run of a healthcheck probe @@ -2896,11 +2923,12 @@ pub struct ImageInspect { #[serde(skip_serializing_if = "Option::is_none")] pub created: Option, - /// The ID of the container that was used to create the image. Depending on how the image was created, this field may be empty. + /// The ID of the container that was used to create the image. Depending on how the image was created, this field may be empty. **Deprecated**: this field is kept for backward compatibility, but will be removed in API v1.45. #[serde(rename = "Container")] #[serde(skip_serializing_if = "Option::is_none")] pub container: Option, + /// **Deprecated**: this field is kept for backward compatibility, but will be removed in API v1.45. #[serde(rename = "ContainerConfig")] #[serde(skip_serializing_if = "Option::is_none")] pub container_config: Option, @@ -2944,7 +2972,7 @@ pub struct ImageInspect { #[serde(skip_serializing_if = "Option::is_none")] pub size: Option, - /// Total size of the image including all layers it is composed of. In versions of Docker before v1.10, this field was calculated from the image itself and all of its parent images. Images are now stored self-contained, and no longer use a parent-chain, making this field an equivalent of the Size field. > **Deprecated**: this field is kept for backward compatibility, but > will be removed in API v1.44. + /// Total size of the image including all layers it is composed of. Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. #[serde(rename = "VirtualSize")] #[serde(skip_serializing_if = "Option::is_none")] pub virtual_size: Option, @@ -3014,6 +3042,7 @@ pub struct ImageSearchResponseItem { #[serde(skip_serializing_if = "Option::is_none")] pub is_official: Option, + /// Whether this repository has automated builds enabled.


> **Deprecated**: This field is deprecated and will always > be \"false\" in future. #[serde(rename = "is_automated")] #[serde(skip_serializing_if = "Option::is_none")] pub is_automated: Option, @@ -3060,7 +3089,7 @@ pub struct ImageSummary { #[serde(rename = "SharedSize")] pub shared_size: i64, - /// Total size of the image including all layers it is composed of. In versions of Docker before v1.10, this field was calculated from the image itself and all of its parent images. Images are now stored self-contained, and no longer use a parent-chain, making this field an equivalent of the Size field. Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + /// Total size of the image including all layers it is composed of. Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. #[serde(rename = "VirtualSize")] #[serde(skip_serializing_if = "Option::is_none")] pub virtual_size: Option, @@ -3365,6 +3394,16 @@ pub struct MountBindOptions { #[serde(skip_serializing_if = "Option::is_none")] pub create_mountpoint: Option, + /// Make the mount non-recursively read-only, but still leave the mount recursive (unless NonRecursive is set to true in conjunction). + #[serde(rename = "ReadOnlyNonRecursive")] + #[serde(skip_serializing_if = "Option::is_none")] + pub read_only_non_recursive: Option, + + /// Raise an error if the mount cannot be made recursively read-only. + #[serde(rename = "ReadOnlyForceRecursive")] + #[serde(skip_serializing_if = "Option::is_none")] + pub read_only_force_recursive: Option, + } #[allow(non_camel_case_types)] @@ -3708,7 +3747,7 @@ pub struct NetworkCreateRequest { #[serde(rename = "Name")] pub name: String, - /// Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions. + /// Deprecated: CheckDuplicate is now always enabled. #[serde(rename = "CheckDuplicate")] #[serde(skip_serializing_if = "Option::is_none")] pub check_duplicate: Option, @@ -3794,7 +3833,7 @@ pub struct NetworkPruneResponse { /// NetworkSettings exposes the network settings in the API #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct NetworkSettings { - /// Name of the network's bridge (for example, `docker0`). + /// Name of the default bridge interface when dockerd's --bridge flag is set. #[serde(rename = "Bridge")] #[serde(skip_serializing_if = "Option::is_none")] pub bridge: Option, @@ -3804,17 +3843,17 @@ pub struct NetworkSettings { #[serde(skip_serializing_if = "Option::is_none")] pub sandbox_id: Option, - /// Indicates if hairpin NAT should be enabled on the virtual interface. + /// Indicates if hairpin NAT should be enabled on the virtual interface. Deprecated: This field is never set and will be removed in a future release. #[serde(rename = "HairpinMode")] #[serde(skip_serializing_if = "Option::is_none")] pub hairpin_mode: Option, - /// IPv6 unicast address using the link-local prefix. + /// IPv6 unicast address using the link-local prefix. Deprecated: This field is never set and will be removed in a future release. #[serde(rename = "LinkLocalIPv6Address")] #[serde(skip_serializing_if = "Option::is_none")] pub link_local_ipv6_address: Option, - /// Prefix length of the IPv6 unicast address. + /// Prefix length of the IPv6 unicast address. Deprecated: This field is never set and will be removed in a future release. #[serde(rename = "LinkLocalIPv6PrefixLen")] #[serde(skip_serializing_if = "Option::is_none")] pub link_local_ipv6_prefix_len: Option, @@ -3823,17 +3862,17 @@ pub struct NetworkSettings { #[serde(skip_serializing_if = "Option::is_none")] pub ports: Option, - /// SandboxKey identifies the sandbox + /// SandboxKey is the full path of the netns handle #[serde(rename = "SandboxKey")] #[serde(skip_serializing_if = "Option::is_none")] pub sandbox_key: Option, - /// + /// Deprecated: This field is never set and will be removed in a future release. #[serde(rename = "SecondaryIPAddresses")] #[serde(skip_serializing_if = "Option::is_none")] pub secondary_ip_addresses: Option>, - /// + /// Deprecated: This field is never set and will be removed in a future release. #[serde(rename = "SecondaryIPv6Addresses")] #[serde(skip_serializing_if = "Option::is_none")] pub secondary_ipv6_addresses: Option>, @@ -3888,7 +3927,7 @@ pub struct NetworkSettings { /// NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct NetworkingConfig { - /// A mapping of network name to endpoint configuration for that network. + /// A mapping of network name to endpoint configuration for that network. The endpoint configuration can be left empty to connect to that network with no particular endpoint configuration. #[serde(rename = "EndpointsConfig")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoints_config: Option>, @@ -4676,6 +4715,15 @@ pub struct PortBinding { // special-casing PortMap, cos swagger-codegen doesn't figure out this type pub type PortMap = HashMap>>; +/// represents the port status of a task's host ports whose service has published host ports +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct PortStatus { + #[serde(rename = "Ports")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ports: Option>, + +} + #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct ProcessConfig { #[serde(rename = "privileged")] @@ -5095,6 +5143,11 @@ pub struct Runtime { #[serde(skip_serializing_if = "Option::is_none")] pub runtime_args: Option>, + /// Information specific to the runtime. While this API specification does not define data provided by runtimes, the following well-known properties may be provided by runtimes: `org.opencontainers.runtime-spec.features`: features structure as defined in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), in a JSON string representation.


> **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. + #[serde(rename = "status")] + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option>, + } #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] @@ -5210,6 +5263,7 @@ pub struct Service { } +/// contains the information returned to a client on the creation of a new service. #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct ServiceCreateResponse { /// The ID of the created service. @@ -5217,10 +5271,10 @@ pub struct ServiceCreateResponse { #[serde(skip_serializing_if = "Option::is_none")] pub id: Option, - /// Optional warning message - #[serde(rename = "Warning")] + /// Optional warning message. FIXME(thaJeztah): this should have \"omitempty\" in the generated type. + #[serde(rename = "Warnings")] #[serde(skip_serializing_if = "Option::is_none")] - pub warning: Option, + pub warnings: Option>, } @@ -5321,7 +5375,7 @@ pub struct ServiceSpec { #[serde(skip_serializing_if = "Option::is_none")] pub rollback_config: Option, - /// Specifies which networks the service should attach to. + /// Specifies which networks the service should attach to. Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. #[serde(rename = "Networks")] #[serde(skip_serializing_if = "Option::is_none")] pub networks: Option>, @@ -6343,12 +6397,12 @@ pub struct SystemInfo { #[serde(skip_serializing_if = "Option::is_none")] pub os_version: Option, - /// Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are \"linux\" and \"windows\". A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + /// Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are \"linux\" and \"windows\". A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). #[serde(rename = "OSType")] #[serde(skip_serializing_if = "Option::is_none")] pub os_type: Option, - /// Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + /// Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). #[serde(rename = "Architecture")] #[serde(skip_serializing_if = "Option::is_none")] pub architecture: Option, @@ -6406,21 +6460,11 @@ pub struct SystemInfo { #[serde(skip_serializing_if = "Option::is_none")] pub experimental_build: Option, - /// Version string of the daemon. > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) > returns the Swarm version instead of the daemon version, for example > `swarm/1.2.8`. + /// Version string of the daemon. #[serde(rename = "ServerVersion")] #[serde(skip_serializing_if = "Option::is_none")] pub server_version: Option, - /// URL of the distributed storage backend. The storage backend is used for multihost networking (to store network and endpoint information) and by the node discovery mechanism.


> **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. - #[serde(rename = "ClusterStore")] - #[serde(skip_serializing_if = "Option::is_none")] - pub cluster_store: Option, - - /// The network endpoint that the Engine advertises for the purpose of node discovery. ClusterAdvertise is a `host:port` combination on which the daemon is reachable by other hosts.


> **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. - #[serde(rename = "ClusterAdvertise")] - #[serde(skip_serializing_if = "Option::is_none")] - pub cluster_advertise: Option, - /// List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the \"name\" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. #[serde(rename = "Runtimes")] #[serde(skip_serializing_if = "Option::is_none")] @@ -6482,6 +6526,11 @@ pub struct SystemInfo { #[serde(skip_serializing_if = "Option::is_none")] pub warnings: Option>, + /// List of directories where (Container Device Interface) CDI specifications are located. These specifications define vendor-specific modifications to an OCI runtime specification for a container being created. An empty list indicates that CDI device injection is disabled. Note that since using CDI device injection requires the daemon to have experimental enabled. For non-experimental daemons an empty list will always be returned. + #[serde(rename = "CDISpecDirs")] + #[serde(skip_serializing_if = "Option::is_none")] + pub cdi_spec_dirs: Option>, + } #[allow(non_camel_case_types)] @@ -7142,6 +7191,72 @@ pub struct TaskSpecContainerSpecPrivileges { #[serde(skip_serializing_if = "Option::is_none")] pub se_linux_context: Option, + #[serde(rename = "Seccomp")] + #[serde(skip_serializing_if = "Option::is_none")] + pub seccomp: Option, + + #[serde(rename = "AppArmor")] + #[serde(skip_serializing_if = "Option::is_none")] + pub app_armor: Option, + + /// Configuration of the no_new_privs bit in the container + #[serde(rename = "NoNewPrivileges")] + #[serde(skip_serializing_if = "Option::is_none")] + pub no_new_privileges: Option, + +} + +/// Options for configuring AppArmor on the container +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct TaskSpecContainerSpecPrivilegesAppArmor { + #[serde(rename = "Mode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub mode: Option, + +} + +#[allow(non_camel_case_types)] +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize, Eq, Ord)] +pub enum TaskSpecContainerSpecPrivilegesAppArmorModeEnum { + #[serde(rename = "")] + EMPTY, + #[serde(rename = "default")] + DEFAULT, + #[serde(rename = "disabled")] + DISABLED, +} + +impl ::std::fmt::Display for TaskSpecContainerSpecPrivilegesAppArmorModeEnum { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match *self { + TaskSpecContainerSpecPrivilegesAppArmorModeEnum::EMPTY => write!(f, ""), + TaskSpecContainerSpecPrivilegesAppArmorModeEnum::DEFAULT => write!(f, "{}", "default"), + TaskSpecContainerSpecPrivilegesAppArmorModeEnum::DISABLED => write!(f, "{}", "disabled"), + + } + } +} + +impl ::std::str::FromStr for TaskSpecContainerSpecPrivilegesAppArmorModeEnum { + type Err = String; + fn from_str(s: &str) -> Result { + match s { + "" => Ok(TaskSpecContainerSpecPrivilegesAppArmorModeEnum::EMPTY), + "default" => Ok(TaskSpecContainerSpecPrivilegesAppArmorModeEnum::DEFAULT), + "disabled" => Ok(TaskSpecContainerSpecPrivilegesAppArmorModeEnum::DISABLED), + x => Err(format!("Invalid enum type: {}", x)), + } + } +} + +impl ::std::convert::AsRef for TaskSpecContainerSpecPrivilegesAppArmorModeEnum { + fn as_ref(&self) -> &str { + match self { + TaskSpecContainerSpecPrivilegesAppArmorModeEnum::EMPTY => "", + TaskSpecContainerSpecPrivilegesAppArmorModeEnum::DEFAULT => "default", + TaskSpecContainerSpecPrivilegesAppArmorModeEnum::DISABLED => "disabled", + } + } } /// CredentialSpec for managed service account (Windows only) @@ -7194,6 +7309,69 @@ pub struct TaskSpecContainerSpecPrivilegesSeLinuxContext { } +/// Options for configuring seccomp on the container +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct TaskSpecContainerSpecPrivilegesSeccomp { + #[serde(rename = "Mode")] + #[serde(skip_serializing_if = "Option::is_none")] + pub mode: Option, + + /// The custom seccomp profile as a json object + #[serde(rename = "Profile")] + #[serde(skip_serializing_if = "Option::is_none")] + pub profile: Option, + +} + +#[allow(non_camel_case_types)] +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize, Eq, Ord)] +pub enum TaskSpecContainerSpecPrivilegesSeccompModeEnum { + #[serde(rename = "")] + EMPTY, + #[serde(rename = "default")] + DEFAULT, + #[serde(rename = "unconfined")] + UNCONFINED, + #[serde(rename = "custom")] + CUSTOM, +} + +impl ::std::fmt::Display for TaskSpecContainerSpecPrivilegesSeccompModeEnum { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match *self { + TaskSpecContainerSpecPrivilegesSeccompModeEnum::EMPTY => write!(f, ""), + TaskSpecContainerSpecPrivilegesSeccompModeEnum::DEFAULT => write!(f, "{}", "default"), + TaskSpecContainerSpecPrivilegesSeccompModeEnum::UNCONFINED => write!(f, "{}", "unconfined"), + TaskSpecContainerSpecPrivilegesSeccompModeEnum::CUSTOM => write!(f, "{}", "custom"), + + } + } +} + +impl ::std::str::FromStr for TaskSpecContainerSpecPrivilegesSeccompModeEnum { + type Err = String; + fn from_str(s: &str) -> Result { + match s { + "" => Ok(TaskSpecContainerSpecPrivilegesSeccompModeEnum::EMPTY), + "default" => Ok(TaskSpecContainerSpecPrivilegesSeccompModeEnum::DEFAULT), + "unconfined" => Ok(TaskSpecContainerSpecPrivilegesSeccompModeEnum::UNCONFINED), + "custom" => Ok(TaskSpecContainerSpecPrivilegesSeccompModeEnum::CUSTOM), + x => Err(format!("Invalid enum type: {}", x)), + } + } +} + +impl ::std::convert::AsRef for TaskSpecContainerSpecPrivilegesSeccompModeEnum { + fn as_ref(&self) -> &str { + match self { + TaskSpecContainerSpecPrivilegesSeccompModeEnum::EMPTY => "", + TaskSpecContainerSpecPrivilegesSeccompModeEnum::DEFAULT => "default", + TaskSpecContainerSpecPrivilegesSeccompModeEnum::UNCONFINED => "unconfined", + TaskSpecContainerSpecPrivilegesSeccompModeEnum::CUSTOM => "custom", + } + } +} + #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct TaskSpecContainerSpecSecrets { #[serde(rename = "File")] @@ -7480,6 +7658,7 @@ impl std::default::Default for TaskState { } } +/// represents the status of a task. #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct TaskStatus { #[serde(rename = "Timestamp")] @@ -7505,23 +7684,11 @@ pub struct TaskStatus { #[serde(rename = "ContainerStatus")] #[serde(skip_serializing_if = "Option::is_none")] - pub container_status: Option, + pub container_status: Option, -} - -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] -pub struct TaskStatusContainerStatus { - #[serde(rename = "ContainerID")] + #[serde(rename = "PortStatus")] #[serde(skip_serializing_if = "Option::is_none")] - pub container_id: Option, - - #[serde(rename = "PID")] - #[serde(skip_serializing_if = "Option::is_none")] - pub pid: Option, - - #[serde(rename = "ExitCode")] - #[serde(skip_serializing_if = "Option::is_none")] - pub exit_code: Option, + pub port_status: Option, } diff --git a/src/lib.rs b/src/lib.rs index 98bb5546..bb61a49e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -53,8 +53,8 @@ //! //! ## Version //! -//! The [Docker API](https://docs.docker.com/engine/api/v1.43/) used by Bollard is using the latest -//! `1.43` documentation schema published by the [moby](https://github.com/moby/moby) project to +//! The [Docker API](https://docs.docker.com/engine/api/v1.44/) used by Bollard is using the latest +//! `1.44` documentation schema published by the [moby](https://github.com/moby/moby) project to //! generate its serialization interface. //! //! This library also supports [version