diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a950428ec..b3a0fe0c3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,90 +17,95 @@ jobs: name: rustfmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: sfackler/actions/rustup@master - - uses: sfackler/actions/rustfmt@master + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - run: cargo fmt --all -- --check clippy: name: clippy runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: sfackler/actions/rustup@master + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@stable + with: + components: clippy - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cargo/registry/index key: index-${{ runner.os }}-${{ github.run_number }} restore-keys: | index-${{ runner.os }}- - run: cargo generate-lockfile - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cargo/registry/cache key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo fetch - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: target - key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y + key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo clippy --all --all-targets check-wasm32: name: check-wasm32 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: sfackler/actions/rustup@master + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@stable + with: + targets: wasm32-unknown-unknown - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - - run: rustup target add wasm32-unknown-unknown - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cargo/registry/index key: index-${{ runner.os }}-${{ github.run_number }} restore-keys: | index-${{ runner.os }}- - run: cargo generate-lockfile - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cargo/registry/cache key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo fetch - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: target key: check-wasm32-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features --features js + env: + RUSTFLAGS: --cfg getrandom_backend="wasm_js" test: name: test runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 - run: docker compose up -d - - uses: sfackler/actions/rustup@master - with: - version: 1.77.0 + - uses: dtolnay/rust-toolchain@1.81.0 - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cargo/registry/index key: index-${{ runner.os }}-${{ github.run_number }} restore-keys: | index-${{ runner.os }}- - run: cargo generate-lockfile - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cargo/registry/cache key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo fetch - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: target - key: test-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y + key: test-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo test --all - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index bbe6b789c..976eae6d3 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -phf_codegen = "0.11" +phf_codegen = "0.13" regex = "1.0" marksman_escape = "0.1" linked-hash-map = "0.5" diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index d21b92eec..5cc9e61fe 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -72,7 +72,6 @@ fn make_code(codes: &LinkedHashMap>, file: &mut BufWriter "{code}","#, - code = code, ) .unwrap(); } @@ -97,8 +96,6 @@ fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter< /// {code} pub const {name}: SqlState = SqlState(Inner::E{code}); "#, - name = name, - code = code, ) .unwrap(); } @@ -121,8 +118,7 @@ enum Inner {{"#, write!( file, r#" - E{},"#, - code, + E{code},"#, ) .unwrap(); } @@ -139,7 +135,7 @@ enum Inner {{"#, fn make_map(codes: &LinkedHashMap>, file: &mut BufWriter) { let mut builder = phf_codegen::Map::new(); for (code, names) in codes { - builder.entry(&**code, &format!("SqlState::{}", &names[0])); + builder.entry(&**code, format!("SqlState::{}", &names[0])); } write!( file, diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index fd7a56450..1b80a5101 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -237,7 +237,7 @@ fn parse_types() -> BTreeMap { let doc_name = array_re.replace(&name, "$1[]").to_ascii_uppercase(); let mut doc = doc_name.clone(); if let Some(descr) = raw_type.get("descr") { - write!(doc, " - {}", descr).unwrap(); + write!(doc, " - {descr}").unwrap(); } let doc = Escape::new(doc.as_bytes().iter().cloned()).collect(); let doc = String::from_utf8(doc).unwrap(); @@ -245,10 +245,10 @@ fn parse_types() -> BTreeMap { if let Some(array_type_oid) = raw_type.get("array_type_oid") { let array_type_oid = array_type_oid.parse::().unwrap(); - let name = format!("_{}", name); - let variant = format!("{}Array", variant); - let doc = format!("{}[]", doc_name); - let ident = format!("{}_ARRAY", ident); + let name = format!("_{name}"); + let variant = format!("{variant}Array"); + let doc = format!("{doc_name}[]"); + let ident = format!("{ident}_ARRAY"); let type_ = Type { name, diff --git a/docker-compose.yml b/docker-compose.yml index 0ed44148d..05c943401 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ version: '2' services: postgres: - image: postgres:14 + image: docker.io/postgres:18 ports: - 5433:5433 volumes: diff --git a/postgres-derive-test/Cargo.toml b/postgres-derive-test/Cargo.toml index 24fd1614f..ce597fe52 100644 --- a/postgres-derive-test/Cargo.toml +++ b/postgres-derive-test/Cargo.toml @@ -2,7 +2,7 @@ name = "postgres-derive-test" version = "0.1.0" authors = ["Steven Fackler "] -edition = "2018" +edition = "2021" [dev-dependencies] trybuild = "1.0" diff --git a/postgres-derive-test/src/domains.rs b/postgres-derive-test/src/domains.rs index 25674f75e..bee415b62 100644 --- a/postgres-derive-test/src/domains.rs +++ b/postgres-derive-test/src/domains.rs @@ -119,3 +119,36 @@ fn domain_in_composite() { )], ); } + +#[test] +fn composite_in_domain_in_composite() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "leaf_composite")] + struct LeafComposite { + prim: i32, + } + + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "domain")] + struct Domain(LeafComposite); + + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "root_composite")] + struct RootComposite { + domain: Domain, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute("CREATE TYPE leaf_composite AS (prim integer); CREATE DOMAIN domain AS leaf_composite; CREATE TYPE root_composite AS (domain domain);").unwrap(); + + test_type( + &mut conn, + "root_composite", + &[( + RootComposite { + domain: Domain(LeafComposite { prim: 1 }), + }, + "ROW(ROW(1))", + )], + ); +} diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs index f0534f32c..5c0b63413 100644 --- a/postgres-derive-test/src/lib.rs +++ b/postgres-derive-test/src/lib.rs @@ -21,7 +21,7 @@ where let result = conn.query_one(&stmt, &[]).unwrap().get(0); assert_eq!(val, &result); - let stmt = conn.prepare(&format!("SELECT $1::{}", sql_type)).unwrap(); + let stmt = conn.prepare(&format!("SELECT $1::{sql_type}")).unwrap(); let result = conn.query_one(&stmt, &[val]).unwrap().get(0); assert_eq!(val, &result); } @@ -45,7 +45,7 @@ pub fn test_type_asymmetric( let result: F = conn.query_one(&stmt, &[]).unwrap().get(0); assert!(cmp(val, &result)); - let stmt = conn.prepare(&format!("SELECT $1::{}", sql_type)).unwrap(); + let stmt = conn.prepare(&format!("SELECT $1::{sql_type}")).unwrap(); let result: F = conn.query_one(&stmt, &[val]).unwrap().get(0); assert!(cmp(val, &result)); } diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index b0075fa8e..7ae780ac5 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,26 @@ # Change Log +## v0.4.7 - 2025-09-25 + +### Added + +* Added support for nested domains containing composite types to `FromSql` + +### Fixed + +* Added `dyn` keyword to boxed trait objects. + +### Changed + +* Updated repository links to use `rust-postgres` organization. +* Upgraded to Rust 2021 edition. + +## v0.4.6 - 2024-09-15 + +### Changed + +* Upgraded `heck`. + ## v0.4.5 - 2023-08-19 ### Added diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index cbae6c77b..30430c756 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,11 +1,12 @@ [package] name = "postgres-derive" -version = "0.4.5" +version = "0.4.7" authors = ["Steven Fackler "] license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" description = "An internal crate used by postgres-types" -repository = "https://github.com/sfackler/rust-postgres" +repository = "https://github.com/rust-postgres/rust-postgres" +rust-version = "1.81" [lib] proc-macro = true diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index d3ac47f4f..7017f4bd9 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -179,7 +179,10 @@ fn domain_accepts_body(name: &str, field: &syn::Field) -> TokenStream { fn domain_body(ident: &Ident, field: &syn::Field) -> TokenStream { let ty = &field.ty; quote! { - <#ty as postgres_types::FromSql>::from_sql(_type, buf).map(#ident) + <#ty as postgres_types::FromSql>::from_sql(match *_type.kind() { + postgres_types::Kind::Domain(ref _type) => _type, + _ => _type + }, buf).map(#ident) } } diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index d50550bee..712d13aac 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -65,7 +65,7 @@ impl Overrides { "invalid rename_all rule, expected one of: {}", RENAME_RULES .iter() - .map(|rule| format!("\"{}\"", rule)) + .map(|rule| format!("\"{rule}\"")) .collect::>() .join(", ") ), diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index 81d4834bf..17266d0f5 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -112,12 +112,12 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { let generics = append_generic_bound(input.generics.to_owned(), &new_tosql_bound()); let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let out = quote! { - impl#impl_generics postgres_types::ToSql for #ident#ty_generics #where_clause { + impl #impl_generics postgres_types::ToSql for #ident #ty_generics #where_clause { fn to_sql(&self, _type: &postgres_types::Type, buf: &mut postgres_types::private::BytesMut) -> std::result::Result> { #to_sql_body @@ -196,7 +196,7 @@ fn composite_body(fields: &[Field]) -> TokenStream { postgres_types::IsNull::Yes => -1, postgres_types::IsNull::No => { let len = buf.len() - base - 4; - if len > i32::max_value() as usize { + if len > i32::MAX as usize { return std::result::Result::Err( std::convert::Into::into("value too large to transmit")); } diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index 9eb7ab800..3ef4dbe68 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,5 +1,18 @@ # Change Log +## v0.5.2 - 2025-09-25 + +### Changed + +* Updated repository links to use `rust-postgres` organization. +* Upgraded to Rust 2021 edition. + +## v0.5.1 - 2025-02-02 + +### Added + +* Added `set_postgresql_alpn`. + ## v0.5.0 - 2020-12-25 ### Changed diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 02259b3dc..dd16d878a 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,27 +1,25 @@ [package] name = "postgres-native-tls" -version = "0.5.0" +version = "0.5.2" authors = ["Steven Fackler "] -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" description = "TLS support for tokio-postgres via native-tls" -repository = "https://github.com/sfackler/rust-postgres" +repository = "https://github.com/rust-postgres/rust-postgres" readme = "../README.md" - -[badges] -circle-ci = { repository = "sfackler/rust-postgres" } +rust-version = "1.81" [features] default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -native-tls = "0.2" +native-tls = { version = "0.2", features = ["alpn"] } tokio = "1.0" tokio-native-tls = "0.3" -tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.7.14", path = "../tokio-postgres", default-features = false } [dev-dependencies] -futures-util = "0.3" +futures-util = { version = "0.3", default-features = false } tokio = { version = "1.0", features = ["macros", "net", "rt"] } postgres = { version = "0.19.8", path = "../postgres" } diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index a06f185b5..9ee7da653 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -53,6 +53,7 @@ //! ``` #![warn(rust_2018_idioms, clippy::all, missing_docs)] +use native_tls::TlsConnectorBuilder; use std::future::Future; use std::io; use std::pin::Pin; @@ -180,3 +181,10 @@ where } } } + +/// Set ALPN for `TlsConnectorBuilder` +/// +/// This is required when using `sslnegotiation=direct` +pub fn set_postgresql_alpn(builder: &mut TlsConnectorBuilder) { + builder.request_alpns(&["postgresql"]); +} diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 25cc6fdbd..738c04bd7 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -5,7 +5,7 @@ use tokio_postgres::tls::TlsConnect; #[cfg(feature = "runtime")] use crate::MakeTlsConnector; -use crate::TlsConnector; +use crate::{set_postgresql_alpn, TlsConnector}; async fn smoke_test(s: &str, tls: T) where @@ -42,6 +42,21 @@ async fn require() { .await; } +#[tokio::test] +async fn direct() { + let mut builder = native_tls::TlsConnector::builder(); + builder.add_root_certificate( + Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), + ); + set_postgresql_alpn(&mut builder); + let connector = builder.build().unwrap(); + smoke_test( + "user=ssl_user dbname=postgres sslmode=require sslnegotiation=direct", + TlsConnector::new(connector, "localhost"), + ) + .await; +} + #[tokio::test] async fn prefer() { let connector = native_tls::TlsConnector::builder() diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index 346214ae8..b0e3d5b35 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,5 +1,18 @@ # Change Log +## v0.5.2 - 2025-09-25 + +### Changed + +* Updated repository links to use `rust-postgres` organization. +* Upgraded to Rust 2021 edition. + +## v0.5.1 - 2025-02-02 + +### Added + +* Added `set_postgresql_alpn`. + ## v0.5.0 - 2020-12-25 ### Changed diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 9013384a2..aa3e4f50a 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,15 +1,13 @@ [package] name = "postgres-openssl" -version = "0.5.0" +version = "0.5.2" authors = ["Steven Fackler "] -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" description = "TLS support for tokio-postgres via openssl" -repository = "https://github.com/sfackler/rust-postgres" +repository = "https://github.com/rust-postgres/rust-postgres" readme = "../README.md" - -[badges] -circle-ci = { repository = "sfackler/rust-postgres" } +rust-version = "1.81" [features] default = ["runtime"] @@ -19,9 +17,9 @@ runtime = ["tokio-postgres/runtime"] openssl = "0.10" tokio = "1.0" tokio-openssl = "0.6" -tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.7.14", path = "../tokio-postgres", default-features = false } [dev-dependencies] -futures-util = "0.3" +futures-util = { version = "0.3", default-features = false } tokio = { version = "1.0", features = ["macros", "net", "rt"] } postgres = { version = "0.19.8", path = "../postgres" } diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 837663fe7..232cccd05 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -53,7 +53,7 @@ use openssl::hash::MessageDigest; use openssl::nid::Nid; #[cfg(feature = "runtime")] use openssl::ssl::SslConnector; -use openssl::ssl::{self, ConnectConfiguration, SslRef}; +use openssl::ssl::{self, ConnectConfiguration, SslConnectorBuilder, SslRef}; use openssl::x509::X509VerifyResult; use std::error::Error; use std::fmt::{self, Debug}; @@ -250,3 +250,10 @@ fn tls_server_end_point(ssl: &SslRef) -> Option> { }; cert.digest(md).ok().map(|b| b.to_vec()) } + +/// Set ALPN for `SslConnectorBuilder` +/// +/// This is required when using `sslnegotiation=direct` +pub fn set_postgresql_alpn(builder: &mut SslConnectorBuilder) -> Result<(), ErrorStack> { + builder.set_alpn_protos(b"\x0apostgresql") +} diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index b361ee446..66bb22641 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -37,6 +37,19 @@ async fn require() { .await; } +#[tokio::test] +async fn direct() { + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_ca_file("../test/server.crt").unwrap(); + set_postgresql_alpn(&mut builder).unwrap(); + let ctx = builder.build(); + smoke_test( + "user=ssl_user dbname=postgres sslmode=require sslnegotiation=direct", + TlsConnector::new(ctx.configure().unwrap(), "localhost"), + ) + .await; +} + #[tokio::test] async fn prefer() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 54dce91b0..0d5370abf 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,18 @@ # Change Log +## v0.6.9 - 2025-09-25 + +### Changed + +* Updated repository links to use `rust-postgres` organization. +* Upgraded to Rust 2021 edition. + +## v0.6.8 - 2025-02-02 + +### Changed + +* Upgraded `getrandom`. + ## v0.6.7 - 2024-07-21 ### Deprecated @@ -128,4 +141,4 @@ Look at the [release tags] for information about older releases. -[release tags]: https://github.com/sfackler/rust-postgres/releases +[release tags]: https://github.com/rust-postgres/rust-postgres/releases diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 49cf2d59c..6d07df120 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,16 +1,17 @@ [package] name = "postgres-protocol" -version = "0.6.7" +version = "0.6.9" authors = ["Steven Fackler "] -edition = "2018" +edition = "2021" description = "Low level Postgres protocol APIs" license = "MIT OR Apache-2.0" -repository = "https://github.com/sfackler/rust-postgres" +repository = "https://github.com/rust-postgres/rust-postgres" readme = "../README.md" +rust-version = "1.81" [features] default = [] -js = ["getrandom/js"] +js = ["getrandom/wasm_js"] [dependencies] base64 = "0.22" @@ -20,7 +21,7 @@ fallible-iterator = "0.2" hmac = "0.12" md-5 = "0.10" memchr = "2.0" -rand = "0.8" +rand = "0.9" sha2 = "0.10" stringprep = "0.1" -getrandom = { version = "0.2", optional = true } +getrandom = { version = "0.3", optional = true } diff --git a/postgres-protocol/src/authentication/mod.rs b/postgres-protocol/src/authentication/mod.rs index 71afa4b9b..efe7ce778 100644 --- a/postgres-protocol/src/authentication/mod.rs +++ b/postgres-protocol/src/authentication/mod.rs @@ -14,7 +14,7 @@ pub fn md5_hash(username: &[u8], password: &[u8], salt: [u8; 4]) -> String { md5.update(password); md5.update(username); let output = md5.finalize_reset(); - md5.update(format!("{:x}", output)); + md5.update(format!("{output:x}")); md5.update(salt); format!("md5{:x}", md5.finalize()) } diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index 4a77507e9..38ea0eede 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -136,10 +136,10 @@ impl ScramSha256 { /// Constructs a new instance which will use the provided password for authentication. pub fn new(password: &[u8], channel_binding: ChannelBinding) -> ScramSha256 { // rand 0.5's ThreadRng is cryptographically secure - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let nonce = (0..NONCE_LENGTH) .map(|_| { - let mut v = rng.gen_range(0x21u8..0x7e); + let mut v = rng.random_range(0x21u8..0x7e); if v == 0x2c { v = 0x7e } @@ -180,7 +180,7 @@ impl ScramSha256 { password, channel_binding, } => (nonce, password, channel_binding), - _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), + _ => return Err(io::Error::other("invalid SCRAM state")), }; let message = @@ -205,7 +205,7 @@ impl ScramSha256 { let client_key = hmac.finalize().into_bytes(); let mut hash = Sha256::default(); - hash.update(client_key.as_slice()); + hash.update(client_key); let stored_key = hash.finalize_fixed(); let mut cbind_input = vec![]; @@ -252,7 +252,7 @@ impl ScramSha256 { salted_password, auth_message, } => (salted_password, auth_message), - _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), + _ => return Err(io::Error::other("invalid SCRAM state")), }; let message = @@ -262,10 +262,7 @@ impl ScramSha256 { let verifier = match parsed { ServerFinalMessage::Error(e) => { - return Err(io::Error::new( - io::ErrorKind::Other, - format!("SCRAM error: {}", e), - )); + return Err(io::Error::other(format!("SCRAM error: {e}"))); } ServerFinalMessage::Verifier(verifier) => verifier, }; @@ -305,10 +302,8 @@ impl<'a> Parser<'a> { match self.it.next() { Some((_, c)) if c == target => Ok(()), Some((i, c)) => { - let m = format!( - "unexpected character at byte {}: expected `{}` but got `{}", - i, target, c - ); + let m = + format!("unexpected character at byte {i}: expected `{target}` but got `{c}"); Err(io::Error::new(io::ErrorKind::InvalidInput, m)) } None => Err(io::Error::new( @@ -374,7 +369,7 @@ impl<'a> Parser<'a> { match self.it.peek() { Some(&(i, _)) => Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unexpected trailing data at byte {}", i), + format!("unexpected trailing data at byte {i}"), )), None => Ok(()), } diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index fdc83fedb..18e24e0e9 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -247,7 +247,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown authentication tag `{}`", tag), + format!("unknown authentication tag `{tag}`"), )); } }, @@ -274,7 +274,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown message tag `{}`", tag), + format!("unknown message tag `{tag}`"), )); } }; @@ -487,7 +487,7 @@ pub struct ColumnFormats<'a> { remaining: u16, } -impl<'a> FallibleIterator for ColumnFormats<'a> { +impl FallibleIterator for ColumnFormats<'_> { type Item = u16; type Error = io::Error; @@ -591,7 +591,7 @@ pub struct DataRowRanges<'a> { remaining: u16, } -impl<'a> FallibleIterator for DataRowRanges<'a> { +impl FallibleIterator for DataRowRanges<'_> { type Item = Option>; type Error = io::Error; @@ -679,7 +679,7 @@ pub struct ErrorField<'a> { value: &'a [u8], } -impl<'a> ErrorField<'a> { +impl ErrorField<'_> { #[inline] pub fn type_(&self) -> u8 { self.type_ @@ -751,7 +751,7 @@ pub struct Parameters<'a> { remaining: u16, } -impl<'a> FallibleIterator for Parameters<'a> { +impl FallibleIterator for Parameters<'_> { type Item = Oid; type Error = io::Error; diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 600f7da48..22c420664 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -3,7 +3,6 @@ use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, BytesMut}; -use std::convert::TryFrom; use std::error::Error; use std::io; use std::marker; diff --git a/postgres-protocol/src/password/mod.rs b/postgres-protocol/src/password/mod.rs index f03bb811d..5f0bbabad 100644 --- a/postgres-protocol/src/password/mod.rs +++ b/postgres-protocol/src/password/mod.rs @@ -28,7 +28,7 @@ const SCRAM_DEFAULT_SALT_LEN: usize = 16; /// special characters that would require escaping in an SQL command. pub fn scram_sha_256(password: &[u8]) -> String { let mut salt: [u8; SCRAM_DEFAULT_SALT_LEN] = [0; SCRAM_DEFAULT_SALT_LEN]; - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); rng.fill_bytes(&mut salt); scram_sha_256_salt(password, salt) } @@ -70,7 +70,7 @@ pub(crate) fn scram_sha_256_salt(password: &[u8], salt: [u8; SCRAM_DEFAULT_SALT_ // stored key let mut hash = Sha256::default(); - hash.update(client_key.as_slice()); + hash.update(client_key); let stored_key = hash.finalize_fixed(); // server key @@ -102,5 +102,5 @@ pub fn md5(password: &[u8], username: &str) -> String { let mut hash = Md5::new(); hash.update(&salted_password); let digest = hash.finalize(); - format!("md5{:x}", digest) + format!("md5{digest:x}") } diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 05f515f76..9ebbeb43c 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -324,7 +324,7 @@ pub fn varbit_from_sql(mut buf: &[u8]) -> Result, StdBox Array<'a> { /// An iterator over the dimensions of an array. pub struct ArrayDimensions<'a>(&'a [u8]); -impl<'a> FallibleIterator for ArrayDimensions<'a> { +impl FallibleIterator for ArrayDimensions<'_> { type Item = ArrayDimension; type Error = StdBox; @@ -639,11 +639,10 @@ impl<'a> FallibleIterator for ArrayValues<'a> { let val = if len < 0 { None } else { - if self.buf.len() < len as usize { - return Err("invalid value length".into()); - } - - let (val, buf) = self.buf.split_at(len as usize); + let (val, buf) = self + .buf + .split_at_checked(len as usize) + .ok_or("invalid value length")?; self.buf = buf; Some(val) }; @@ -771,10 +770,7 @@ fn read_bound<'a>( None } else { let len = len as usize; - if buf.len() < len { - return Err("invalid message size".into()); - } - let (value, tail) = buf.split_at(len); + let (value, tail) = buf.split_at_checked(len).ok_or("invalid message size")?; *buf = tail; Some(value) }; @@ -950,7 +946,7 @@ pub struct PathPoints<'a> { buf: &'a [u8], } -impl<'a> FallibleIterator for PathPoints<'a> { +impl FallibleIterator for PathPoints<'_> { type Item = Point; type Error = StdBox; diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 1e5cd31d8..f0dbd7001 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,6 +1,41 @@ # Change Log -## Unreleased +## v0.2.11 - 2025-10-08 + +### Changed + +* Disabled default features of `jiff` v0.1 and v0.2. + +## v0.2.10 - 2025-09-25 + +### Added + +* Added support for `jiff` v0.2. +* Added support for `bit-vec` versions v0.7 and v0.8. +* Added `Serialize` and `Deserialize` impls for `Json` type. + +### Changed + +* Updated GitHub repository links to `rust-postgres` organization. +* Switched from `serde` to `serde_core` dependency. +* Upgraded to Rust 2021 edition. +* Improved `-Zminimal-versions` support. + +## v0.2.9 - 2025-02-02 + +### Added + +* Added support for `cidr` 0.3 via the `with-cidr-0_3` feature. + +### Fixed + +* Fixed deserialization of out of bounds inputs to `time` 0.3 types to return an error rather than panic. + +## v0.2.8 - 2024-09-15 + +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. ## v0.2.7 - 2024-07-21 diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index e2d21b358..57cb7a517 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,26 +1,32 @@ [package] name = "postgres-types" -version = "0.2.7" +version = "0.2.11" authors = ["Steven Fackler "] -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" description = "Conversions between Rust and Postgres values" -repository = "https://github.com/sfackler/rust-postgres" +repository = "https://github.com/rust-postgres/rust-postgres" readme = "../README.md" keywords = ["database", "postgres", "postgresql", "sql"] categories = ["database"] +rust-version = "1.81" [features] derive = ["postgres-derive"] array-impls = ["array-init"] js = ["postgres-protocol/js"] with-bit-vec-0_6 = ["bit-vec-06"] +with-bit-vec-0_7 = ["bit-vec-07"] +with-bit-vec-0_8 = ["bit-vec-08"] with-cidr-0_2 = ["cidr-02"] +with-cidr-0_3 = ["cidr-03"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] with-eui48-1 = ["eui48-1"] with-geo-types-0_6 = ["geo-types-06"] with-geo-types-0_7 = ["geo-types-0_7"] +with-jiff-0_1 = ["jiff-01"] +with-jiff-0_2 = ["jiff-02"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-smol_str-01 = ["smol_str-01"] with-uuid-0_8 = ["uuid-08"] @@ -31,25 +37,30 @@ with-time-0_3 = ["time-03"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -postgres-protocol = { version = "0.6.7", path = "../postgres-protocol" } -postgres-derive = { version = "0.4.5", optional = true, path = "../postgres-derive" } +postgres-protocol = { version = "0.6.9", path = "../postgres-protocol" } +postgres-derive = { version = "0.4.7", optional = true, path = "../postgres-derive" } array-init = { version = "2", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } +bit-vec-07 = { version = "0.7", package = "bit-vec", optional = true } +bit-vec-08 = { version = "0.8", package = "bit-vec", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = [ "clock", ], optional = true } cidr-02 = { version = "0.2", package = "cidr", optional = true } +cidr-03 = { version = "0.3", package = "cidr", optional = true } # eui48-04 will stop compiling and support will be removed -# See https://github.com/sfackler/rust-postgres/issues/1073 -eui48-04 = { version = "0.4", package = "eui48", optional = true } +# See https://github.com/rust-postgres/rust-postgres/issues/1073 +eui48-04 = { version = "0.4.6", package = "eui48", optional = true } eui48-1 = { version = "1.0", package = "eui48", optional = true, default-features = false } geo-types-06 = { version = "0.6", package = "geo-types", optional = true } -geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } -serde-1 = { version = "1.0", package = "serde", optional = true } -serde_json-1 = { version = "1.0", package = "serde_json", optional = true } +geo-types-0_7 = { version = "0.7.8", package = "geo-types", optional = true } +jiff-01 = { version = "0.1", package = "jiff", default-features = false, features = ["std"], optional = true } +jiff-02 = { version = "0.2", package = "jiff", default-features = false, features = ["std"], optional = true } +serde-1 = { version = "1.0.221", package = "serde_core", optional = true } +serde_json-1 = { version = "1.0.144", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } uuid-1 = { version = "1.0", package = "uuid", optional = true } -time-02 = { version = "0.2", package = "time", optional = true } -time-03 = { version = "0.3", package = "time", default-features = false, optional = true } +time-02 = { version = "0.2.7", package = "time", optional = true } +time-03 = { version = "0.3.5", package = "time", default-features = false, optional = true } smol_str-01 = { version = "0.1.23", package = "smol_str", default-features = false, optional = true } diff --git a/postgres-types/src/bit_vec_07.rs b/postgres-types/src/bit_vec_07.rs new file mode 100644 index 000000000..44a3d1c4e --- /dev/null +++ b/postgres-types/src/bit_vec_07.rs @@ -0,0 +1,30 @@ +use bit_vec_07::BitVec; +use bytes::BytesMut; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for BitVec { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let varbit = types::varbit_from_sql(raw)?; + let mut bitvec = BitVec::from_bytes(varbit.bytes()); + while bitvec.len() > varbit.len() { + bitvec.pop(); + } + + Ok(bitvec) + } + + accepts!(BIT, VARBIT); +} + +impl ToSql for BitVec { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::varbit_to_sql(self.len(), self.to_bytes().into_iter(), out)?; + Ok(IsNull::No) + } + + accepts!(BIT, VARBIT); + to_sql_checked!(); +} diff --git a/postgres-types/src/bit_vec_08.rs b/postgres-types/src/bit_vec_08.rs new file mode 100644 index 000000000..4c03bc424 --- /dev/null +++ b/postgres-types/src/bit_vec_08.rs @@ -0,0 +1,30 @@ +use bit_vec_08::BitVec; +use bytes::BytesMut; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for BitVec { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let varbit = types::varbit_from_sql(raw)?; + let mut bitvec = BitVec::from_bytes(varbit.bytes()); + while bitvec.len() > varbit.len() { + bitvec.pop(); + } + + Ok(bitvec) + } + + accepts!(BIT, VARBIT); +} + +impl ToSql for BitVec { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::varbit_to_sql(self.len(), self.to_bytes().into_iter(), out)?; + Ok(IsNull::No) + } + + accepts!(BIT, VARBIT); + to_sql_checked!(); +} diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index 6b6406232..3925007f2 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -123,11 +123,9 @@ impl<'a> FromSql<'a> for NaiveDate { impl ToSql for NaiveDate { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let jd = self.signed_duration_since(base().date()).num_days(); - if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { - return Err("value too large to transmit".into()); - } + let jd = i32::try_from(jd).map_err(|_| "value too large to transmit")?; - types::date_to_sql(jd as i32, w); + types::date_to_sql(jd, w); Ok(IsNull::No) } diff --git a/postgres-types/src/cidr_03.rs b/postgres-types/src/cidr_03.rs new file mode 100644 index 000000000..6a0178711 --- /dev/null +++ b/postgres-types/src/cidr_03.rs @@ -0,0 +1,44 @@ +use bytes::BytesMut; +use cidr_03::{IpCidr, IpInet}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for IpCidr { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpCidr::new(inet.addr(), inet.netmask())?) + } + + accepts!(CIDR); +} + +impl ToSql for IpCidr { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.first_address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(CIDR); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for IpInet { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpInet::new(inet.addr(), inet.netmask())?) + } + + accepts!(INET); +} + +impl ToSql for IpInet { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(INET); + to_sql_checked!(); +} diff --git a/postgres-types/src/eui48_04.rs b/postgres-types/src/eui48_04.rs index 45df89a84..cb913e91a 100644 --- a/postgres-types/src/eui48_04.rs +++ b/postgres-types/src/eui48_04.rs @@ -16,9 +16,7 @@ impl<'a> FromSql<'a> for MacAddress { impl ToSql for MacAddress { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let mut bytes = [0; 6]; - bytes.copy_from_slice(self.as_bytes()); - types::macaddr_to_sql(bytes, w); + types::macaddr_to_sql(self.to_array(), w); Ok(IsNull::No) } diff --git a/postgres-types/src/eui48_1.rs b/postgres-types/src/eui48_1.rs index 4c35e63ce..bd2bd1833 100644 --- a/postgres-types/src/eui48_1.rs +++ b/postgres-types/src/eui48_1.rs @@ -16,9 +16,7 @@ impl<'a> FromSql<'a> for MacAddress { impl ToSql for MacAddress { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let mut bytes = [0; 6]; - bytes.copy_from_slice(self.as_bytes()); - types::macaddr_to_sql(bytes, w); + types::macaddr_to_sql(self.to_array(), w); Ok(IsNull::No) } diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs new file mode 100644 index 000000000..d3215c0e6 --- /dev/null +++ b/postgres-types/src/jiff_01.rs @@ -0,0 +1,141 @@ +use bytes::BytesMut; +use jiff_01::{ + civil::{Date, DateTime, Time}, + Span, SpanRound, Timestamp, Unit, +}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +const fn base() -> DateTime { + DateTime::constant(2000, 1, 1, 0, 0, 0, 0) +} + +/// The number of seconds from the Unix epoch to 2000-01-01 00:00:00 UTC. +const PG_EPOCH: i64 = 946684800; + +fn base_ts() -> Timestamp { + Timestamp::new(PG_EPOCH, 0).unwrap() +} + +fn round_us<'a>() -> SpanRound<'a> { + SpanRound::new().largest(Unit::Microsecond) +} + +fn decode_err(_e: E) -> Box +where + E: Error, +{ + "value too large to decode".into() +} + +fn transmit_err(_e: E) -> Box +where + E: Error, +{ + "value too large to transmit".into() +} + +impl<'a> FromSql<'a> for DateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for DateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Timestamp { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base_ts().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for Timestamp { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base_ts()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::date_from_sql(raw)?; + Span::new() + .try_days(v) + .and_then(|s| base().date().checked_add(s)) + .map_err(decode_err) + } + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self.since(base().date()).map_err(transmit_err)?.get_days(); + types::date_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::time_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| Time::midnight().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(Time::midnight()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::time_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres-types/src/jiff_02.rs b/postgres-types/src/jiff_02.rs new file mode 100644 index 000000000..a736dd3eb --- /dev/null +++ b/postgres-types/src/jiff_02.rs @@ -0,0 +1,141 @@ +use bytes::BytesMut; +use jiff_02::{ + civil::{Date, DateTime, Time}, + Span, SpanRound, Timestamp, Unit, +}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +const fn base() -> DateTime { + DateTime::constant(2000, 1, 1, 0, 0, 0, 0) +} + +/// The number of seconds from the Unix epoch to 2000-01-01 00:00:00 UTC. +const PG_EPOCH: i64 = 946684800; + +fn base_ts() -> Timestamp { + Timestamp::new(PG_EPOCH, 0).unwrap() +} + +fn round_us<'a>() -> SpanRound<'a> { + SpanRound::new().largest(Unit::Microsecond) +} + +fn decode_err(_e: E) -> Box +where + E: Error, +{ + "value too large to decode".into() +} + +fn transmit_err(_e: E) -> Box +where + E: Error, +{ + "value too large to transmit".into() +} + +impl<'a> FromSql<'a> for DateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for DateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base()) + .and_then(|s| s.round(round_us().relative(base()))) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Timestamp { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base_ts().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for Timestamp { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base_ts()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::date_from_sql(raw)?; + Span::new() + .try_days(v) + .and_then(|s| base().date().checked_add(s)) + .map_err(decode_err) + } + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self.since(base().date()).map_err(transmit_err)?.get_days(); + types::date_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::time_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| Time::midnight().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(Time::midnight()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::time_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 492039766..9da20c522 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -264,10 +264,16 @@ where #[cfg(feature = "with-bit-vec-0_6")] mod bit_vec_06; +#[cfg(feature = "with-bit-vec-0_7")] +mod bit_vec_07; +#[cfg(feature = "with-bit-vec-0_8")] +mod bit_vec_08; #[cfg(feature = "with-chrono-0_4")] mod chrono_04; #[cfg(feature = "with-cidr-0_2")] mod cidr_02; +#[cfg(feature = "with-cidr-0_3")] +mod cidr_03; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; #[cfg(feature = "with-eui48-1")] @@ -276,6 +282,10 @@ mod eui48_1; mod geo_types_06; #[cfg(feature = "with-geo-types-0_7")] mod geo_types_07; +#[cfg(feature = "with-jiff-0_1")] +mod jiff_01; +#[cfg(feature = "with-jiff-0_2")] +mod jiff_02; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-smol_str-01")] @@ -313,7 +323,7 @@ impl fmt::Display for Type { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.schema() { "public" | "pg_catalog" => {} - schema => write!(fmt, "{}.", schema)?, + schema => write!(fmt, "{schema}.")?, } fmt.write_str(self.name()) } @@ -487,10 +497,16 @@ impl WrongType { /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | +/// | `cidr::IpCidr` | CIDR | +/// | `cidr::IpInet` | INET | /// | `time::PrimitiveDateTime` | TIMESTAMP | /// | `time::OffsetDateTime` | TIMESTAMP WITH TIME ZONE | /// | `time::Date` | DATE | /// | `time::Time` | TIME | +/// | `jiff::civil::Date` | DATE | +/// | `jiff::civil::DateTime` | TIMESTAMP | +/// | `jiff::civil::Time` | TIME | +/// | `jiff::Timestamp` | TIMESTAMP WITH TIME ZONE | /// | `eui48::MacAddress` | MACADDR | /// | `geo_types::Point` | POINT | /// | `geo_types::Rect` | BOX | @@ -619,16 +635,14 @@ impl<'a, T: FromSql<'a>, const N: usize> FromSql<'a> for [T; N] { let v = values .next()? .ok_or_else(|| -> Box { - format!("too few elements in array (expected {}, got {})", N, i).into() + format!("too few elements in array (expected {N}, got {i})").into() })?; T::from_sql_nullable(member_type, v) })?; if values.next()?.is_some() { - return Err(format!( - "excess elements in array (expected {}, got more than that)", - N, - ) - .into()); + return Err( + format!("excess elements in array (expected {N}, got more than that)",).into(), + ); } Ok(out) @@ -642,6 +656,16 @@ impl<'a, T: FromSql<'a>, const N: usize> FromSql<'a> for [T; N] { } } +impl<'a, T: FromSql<'a>> FromSql<'a> for Box { + fn from_sql(ty: &Type, row: &'a [u8]) -> Result> { + T::from_sql(ty, row).map(Box::new) + } + + fn accepts(ty: &Type) -> bool { + T::accepts(ty) + } +} + impl<'a, T: FromSql<'a>> FromSql<'a> for Box<[T]> { fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { Vec::::from_sql(ty, raw).map(Vec::into_boxed_slice) @@ -834,6 +858,8 @@ pub enum IsNull { /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | +/// | `cidr::IpCidr` | CIDR | +/// | `cidr::IpInet` | INET | /// | `time::PrimitiveDateTime` | TIMESTAMP | /// | `time::OffsetDateTime` | TIMESTAMP WITH TIME ZONE | /// | `time::Date` | DATE | @@ -858,6 +884,9 @@ pub enum IsNull { /// `ToSql` is implemented for `[u8; N]`, `Vec`, `&[T]`, `Box<[T]>` and `[T; N]` /// where `T` implements `ToSql` and `N` is const usize, and corresponds to one-dimensional /// Postgres arrays with an index offset of 1. +/// To make conversion work correctly for `WHERE ... IN` clauses, for example +/// `WHERE col IN ($1)`, you may instead have to use the construct +/// `WHERE col = ANY ($1)` which expects an array. /// /// **Note:** the impl for arrays only exist when the Cargo feature `array-impls` /// is enabled. @@ -908,7 +937,7 @@ pub enum Format { Binary, } -impl<'a, T> ToSql for &'a T +impl ToSql for &T where T: ToSql, { @@ -949,7 +978,7 @@ impl ToSql for Option { fn encode_format(&self, ty: &Type) -> Format { match self { - Some(ref val) => val.encode_format(ty), + Some(val) => val.encode_format(ty), None => Format::Binary, } } @@ -957,7 +986,7 @@ impl ToSql for Option { to_sql_checked!(); } -impl<'a, T: ToSql> ToSql for &'a [T] { +impl ToSql for &[T] { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { let member_type = match *ty.kind() { Kind::Array(ref member) => member, @@ -998,7 +1027,7 @@ impl<'a, T: ToSql> ToSql for &'a [T] { to_sql_checked!(); } -impl<'a> ToSql for &'a [u8] { +impl ToSql for &[u8] { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { types::bytea_to_sql(self, w); Ok(IsNull::No) @@ -1046,6 +1075,18 @@ impl ToSql for Vec { to_sql_checked!(); } +impl ToSql for Box { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&T as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&T as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + impl ToSql for Box<[T]> { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[T] as ToSql>::to_sql(&&**self, ty, w) @@ -1058,7 +1099,7 @@ impl ToSql for Box<[T]> { to_sql_checked!(); } -impl<'a> ToSql for Cow<'a, [u8]> { +impl ToSql for Cow<'_, [u8]> { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[u8] as ToSql>::to_sql(&self.as_ref(), ty, w) } @@ -1082,7 +1123,7 @@ impl ToSql for Vec { to_sql_checked!(); } -impl<'a> ToSql for &'a str { +impl ToSql for &str { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { match ty.name() { "ltree" => types::ltree_to_sql(self, w), @@ -1103,7 +1144,7 @@ impl<'a> ToSql for &'a str { to_sql_checked!(); } -impl<'a> ToSql for Cow<'a, str> { +impl ToSql for Cow<'_, str> { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&str as ToSql>::to_sql(&self.as_ref(), ty, w) } @@ -1250,17 +1291,17 @@ impl BorrowToSql for &dyn ToSql { } } -impl<'a> sealed::Sealed for Box {} +impl sealed::Sealed for Box {} -impl<'a> BorrowToSql for Box { +impl BorrowToSql for Box { #[inline] fn borrow_to_sql(&self) -> &dyn ToSql { self.as_ref() } } -impl<'a> sealed::Sealed for Box {} -impl<'a> BorrowToSql for Box { +impl sealed::Sealed for Box {} +impl BorrowToSql for Box { #[inline] fn borrow_to_sql(&self) -> &dyn ToSql { self.as_ref() diff --git a/postgres-types/src/pg_lsn.rs b/postgres-types/src/pg_lsn.rs index f339f9689..51004b329 100644 --- a/postgres-types/src/pg_lsn.rs +++ b/postgres-types/src/pg_lsn.rs @@ -52,7 +52,7 @@ impl fmt::Display for PgLsn { impl fmt::Debug for PgLsn { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_fmt(format_args!("{}", self)) + fmt::Display::fmt(self, f) } } diff --git a/postgres-types/src/private.rs b/postgres-types/src/private.rs index 774f9a301..d5205cb74 100644 --- a/postgres-types/src/private.rs +++ b/postgres-types/src/private.rs @@ -3,13 +3,13 @@ pub use bytes::BytesMut; use std::error::Error; pub fn read_be_i32(buf: &mut &[u8]) -> Result> { - if buf.len() < 4 { - return Err("invalid buffer size".into()); - } - let mut bytes = [0; 4]; - bytes.copy_from_slice(&buf[..4]); + let val = buf + .get(..4) + .ok_or("invalid buffer size")? + .try_into() + .unwrap(); *buf = &buf[4..]; - Ok(i32::from_be_bytes(bytes)) + Ok(i32::from_be_bytes(val)) } pub fn read_value<'a, T>( @@ -23,10 +23,9 @@ where let value = if len < 0 { None } else { - if len as usize > buf.len() { - return Err("invalid buffer size".into()); - } - let (head, tail) = buf.split_at(len as usize); + let (head, tail) = buf + .split_at_checked(len as usize) + .ok_or("invalid buffer size")?; *buf = tail; Some(head) }; diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index 715c33f98..ef1e749bb 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -1,6 +1,6 @@ use crate::{FromSql, IsNull, ToSql, Type}; use bytes::{BufMut, BytesMut}; -use serde_1::{Deserialize, Serialize}; +use serde_1::{Deserialize, Deserializer, Serialize, Serializer}; use serde_json_1::Value; use std::error::Error; use std::fmt::Debug; @@ -10,6 +10,18 @@ use std::io::Read; #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct Json(pub T); +impl Serialize for Json { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + +impl<'de, T: Deserialize<'de>> Deserialize<'de> for Json { + fn deserialize>(deserializer: D) -> Result { + T::deserialize(deserializer).map(Self) + } +} + impl<'a, T> FromSql<'a> for Json where T: Deserialize<'a>, diff --git a/postgres-types/src/time_02.rs b/postgres-types/src/time_02.rs index 19a8909e7..6ac4815df 100644 --- a/postgres-types/src/time_02.rs +++ b/postgres-types/src/time_02.rs @@ -1,6 +1,5 @@ use bytes::BytesMut; use postgres_protocol::types; -use std::convert::TryFrom; use std::error::Error; use time_02::{date, time, Date, Duration, OffsetDateTime, PrimitiveDateTime, Time, UtcOffset}; @@ -72,11 +71,9 @@ impl<'a> FromSql<'a> for Date { impl ToSql for Date { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let jd = (*self - base().date()).whole_days(); - if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { - return Err("value too large to transmit".into()); - } + let jd = i32::try_from(jd).map_err(|_| "value too large to transmit")?; - types::date_to_sql(jd as i32, w); + types::date_to_sql(jd, w); Ok(IsNull::No) } diff --git a/postgres-types/src/time_03.rs b/postgres-types/src/time_03.rs index f136fab7c..cb5173e1e 100644 --- a/postgres-types/src/time_03.rs +++ b/postgres-types/src/time_03.rs @@ -1,6 +1,5 @@ use bytes::BytesMut; use postgres_protocol::types; -use std::convert::TryFrom; use std::error::Error; use time_03::{Date, Duration, OffsetDateTime, PrimitiveDateTime, Time, UtcOffset}; @@ -13,7 +12,9 @@ fn base() -> PrimitiveDateTime { impl<'a> FromSql<'a> for PrimitiveDateTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let t = types::timestamp_from_sql(raw)?; - Ok(base() + Duration::microseconds(t)) + Ok(base() + .checked_add(Duration::microseconds(t)) + .ok_or("value too large to decode")?) } accepts!(TIMESTAMP); @@ -62,7 +63,10 @@ impl ToSql for OffsetDateTime { impl<'a> FromSql<'a> for Date { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let jd = types::date_from_sql(raw)?; - Ok(base().date() + Duration::days(i64::from(jd))) + Ok(base() + .date() + .checked_add(Duration::days(i64::from(jd))) + .ok_or("value too large to decode")?) } accepts!(DATE); @@ -71,11 +75,9 @@ impl<'a> FromSql<'a> for Date { impl ToSql for Date { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let jd = (*self - base().date()).whole_days(); - if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { - return Err("value too large to transmit".into()); - } + let jd = i32::try_from(jd).map_err(|_| "value too large to transmit")?; - types::date_to_sql(jd as i32, w); + types::date_to_sql(jd, w); Ok(IsNull::No) } diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 258cdb518..b70bc1182 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,41 @@ # Change Log +## v0.19.12 - 2025-10-08 + +### Added + +* Added `Client::check_connection` API. + +### Changed + +* Improved the effectiveness of `Client::is_closed`. +* Disabled default features of `futures-util`. + +## v0.19.11 - 2025-09-25 + +### Added + +* Added support for `jiff` v0.2. +* Added support for `bit-vec` versions v0.7 and v0.8. + +### Changed + +* Updated repository links to use `rust-postgres` organization. +* Upgraded to Rust 2021 edition. + +## v0.19.10 - 2025-02-02 + +### Added + +* Added support for direct TLS negotiation. +* Added support for `cidr` 0.3 via the `with-cidr-0_3` feature. + +## v0.19.9 - 2024-09-15 + +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. + ## v0.19.8 - 2024-07-21 ### Added @@ -268,4 +304,4 @@ Look at the [release tags] for information about older releases. -[release tags]: https://github.com/sfackler/rust-postgres/releases +[release tags]: https://github.com/rust-postgres/rust-postgres/releases diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index ff95c4f14..d6d3caa34 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,14 +1,15 @@ [package] name = "postgres" -version = "0.19.8" +version = "0.19.12" authors = ["Steven Fackler "] -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" description = "A native, synchronous PostgreSQL client" -repository = "https://github.com/sfackler/rust-postgres" +repository = "https://github.com/rust-postgres/rust-postgres" readme = "../README.md" keywords = ["database", "postgres", "postgresql", "sql"] categories = ["database"] +rust-version = "1.81" [[bench]] name = "bench" @@ -17,17 +18,20 @@ harness = false [package.metadata.docs.rs] all-features = true -[badges] -circle-ci = { repository = "sfackler/rust-postgres" } - [features] array-impls = ["tokio-postgres/array-impls"] with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] +with-bit-vec-0_7 = ["tokio-postgres/with-bit-vec-0_7"] +with-bit-vec-0_8 = ["tokio-postgres/with-bit-vec-0_8"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] +with-cidr-0_2 = ["tokio-postgres/with-cidr-0_2"] +with-cidr-0_3 = ["tokio-postgres/with-cidr-0_3"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] with-eui48-1 = ["tokio-postgres/with-eui48-1"] with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] +with-jiff-0_1 = ["tokio-postgres/with-jiff-0_1"] +with-jiff-0_2 = ["tokio-postgres/with-jiff-0_2"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-smol_str-01 = ["tokio-postgres/with-smol_str-01"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] @@ -38,10 +42,10 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -futures-util = { version = "0.3.14", features = ["sink"] } +futures-util = { version = "0.3.14", default-features = false, features = ["sink"] } log = "0.4" -tokio-postgres = { version = "0.7.11", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.15", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } [dev-dependencies] -criterion = "0.5" +criterion = "0.7" diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 42ce6dec9..12133a0ae 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -487,6 +487,11 @@ impl Client { self.connection.block_on(self.client.batch_execute(query)) } + /// Check that the connection is alive and wait for the confirmation. + pub fn check_connection(&mut self) -> Result<(), Error> { + self.connection.block_on(self.client.check_connection()) + } + /// Begins a new database transaction. /// /// The transaction will roll back by default - use the `commit` method to commit it. diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 91ad3c904..c1d3e5147 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -1,5 +1,7 @@ //! Connection configuration. +#![allow(clippy::doc_overindented_list_items)] + use crate::connection::Connection; use crate::Client; use log::info; @@ -12,7 +14,7 @@ use std::time::Duration; use tokio::runtime; #[doc(inline)] pub use tokio_postgres::config::{ - ChannelBinding, Host, LoadBalanceHosts, SslMode, TargetSessionAttrs, + ChannelBinding, Host, LoadBalanceHosts, SslMode, SslNegotiation, TargetSessionAttrs, }; use tokio_postgres::error::DbError; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; @@ -44,6 +46,9 @@ use tokio_postgres::{Error, Socket}; /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `sslnegotiation` - TLS negotiation method. If set to `direct`, the client will perform direct TLS handshake, this only works for PostgreSQL 17 and newer. +/// Note that you will need to setup ALPN of TLS client configuration to `postgresql` when using direct TLS. +/// If set to `postgres`, the default value, it follows original postgres wire protocol to perform the negotiation. /// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, /// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. /// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, @@ -93,7 +98,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' +/// host=/var/run/postgresql,localhost port=1234 user=postgres password='password with spaces' /// ``` /// /// ```not_rust @@ -118,7 +123,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 +/// postgresql://user:password@%2Fvar%2Frun%2Fpostgresql/mydb?connect_timeout=10 /// ``` /// /// ```not_rust @@ -126,7 +131,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// postgresql:///mydb?user=user&host=/var/lib/postgresql +/// postgresql:///mydb?user=user&host=/var/run/postgresql /// ``` #[derive(Clone)] pub struct Config { @@ -273,6 +278,17 @@ impl Config { self.config.get_ssl_root_cert() } + /// Sets the SSL negotiation method + pub fn ssl_negotiation(&mut self, ssl_negotiation: SslNegotiation) -> &mut Config { + self.config.ssl_negotiation(ssl_negotiation); + self + } + + /// Gets the SSL negotiation method + pub fn get_ssl_negotiation(&self) -> SslNegotiation { + self.config.get_ssl_negotiation() + } + /// Adds a host to the configuration. /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs index b91c16555..2cd8155de 100644 --- a/postgres/src/connection.rs +++ b/postgres/src/connection.rs @@ -1,9 +1,9 @@ use crate::{Error, Notification}; -use futures_util::{future, pin_mut, Stream}; +use futures_util::Stream; use std::collections::VecDeque; -use std::future::Future; +use std::future::{self, Future}; use std::ops::{Deref, DerefMut}; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::Arc; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; @@ -52,7 +52,7 @@ impl Connection { where F: Future>, { - pin_mut!(future); + let mut future = pin!(future); self.poll_block_on(|cx, _, _| future.as_mut().poll(cx)) } diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index 7de663637..83c642c73 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -53,7 +53,6 @@ impl Write for CopyInWriter<'_> { } fn flush(&mut self) -> io::Result<()> { - self.flush_inner() - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + self.flush_inner().map_err(io::Error::other) } } diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index 828b71873..b683ddeec 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -41,7 +41,7 @@ impl BufRead for CopyOutReader<'_> { .block_on(async { stream.next().await.transpose() }) { Ok(Some(cur)) => self.cur = cur, - Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), + Err(e) => return Err(io::Error::other(e)), Ok(None) => break, }; } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index ddf1609ad..255a7a171 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -54,6 +54,8 @@ //! | Feature | Description | Extra dependencies | Default | //! | ------- | ----------- | ------------------ | ------- | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | +//! | `with-bit-vec-0_7` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.7 | no | +//! | `with-bit-vec-0_8` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.8 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. This is deprecated and will be removed. | [eui48](https://crates.io/crates/eui48) 0.4 | no | //! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | diff --git a/postgres/src/notifications.rs b/postgres/src/notifications.rs index c31d4f631..33fd3ae4f 100644 --- a/postgres/src/notifications.rs +++ b/postgres/src/notifications.rs @@ -3,9 +3,9 @@ use crate::connection::ConnectionRef; use crate::{Error, Notification}; use fallible_iterator::FallibleIterator; -use futures_util::{ready, FutureExt}; +use futures_util::FutureExt; use std::pin::Pin; -use std::task::Poll; +use std::task::{ready, Poll}; use std::time::Duration; use tokio::time::{self, Instant, Sleep}; @@ -77,7 +77,7 @@ pub struct Iter<'a> { connection: ConnectionRef<'a>, } -impl<'a> FallibleIterator for Iter<'a> { +impl FallibleIterator for Iter<'_> { type Item = Notification; type Error = Error; @@ -100,7 +100,7 @@ pub struct BlockingIter<'a> { connection: ConnectionRef<'a>, } -impl<'a> FallibleIterator for BlockingIter<'a> { +impl FallibleIterator for BlockingIter<'_> { type Item = Notification; type Error = Error; @@ -129,7 +129,7 @@ pub struct TimeoutIter<'a> { timeout: Duration, } -impl<'a> FallibleIterator for TimeoutIter<'a> { +impl FallibleIterator for TimeoutIter<'_> { type Item = Notification; type Error = Error; diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 0fd404574..4e5b49761 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -508,3 +508,24 @@ fn check_send() { is_send::(); is_send::>(); } + +#[test] +fn is_closed() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + assert!(!client.is_closed()); + client.check_connection().unwrap(); + + let row = client.query_one("select pg_backend_pid()", &[]).unwrap(); + let pid: i32 = row.get(0); + + { + let mut client2 = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + client2 + .query("SELECT pg_terminate_backend($1)", &[&pid]) + .unwrap(); + } + + assert!(!client.is_closed()); + client.check_connection().unwrap_err(); + assert!(client.is_closed()); +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 5c8c15973..8126b1dbe 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -12,7 +12,7 @@ pub struct Transaction<'a> { transaction: Option>, } -impl<'a> Drop for Transaction<'a> { +impl Drop for Transaction<'_> { fn drop(&mut self) { if let Some(transaction) = self.transaction.take() { let _ = self.connection.block_on(transaction.rollback()); diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index e0be26296..03755933e 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,6 +1,63 @@ # Change Log -## Unreleased +## v0.7.15 - 2025-10-08 + +### Added + +* Added `Client::check_connection` API. +* Added `Client::simple_query_raw` API. + +### Changed + +* Improved the effectiveness of `Client::is_closed`. +* Stop including error chain in `Display` impl of `Error` + +## v0.7.14 - 2025-09-25 + +### Added + +* Added support for `jiff` v0.2. +* Added support for `bit-vec` versions v0.7 and v0.8. +* Re-exported `fallible-iterator` +* Expose sync rollback as a private API method + +### Fixes + +* Changed `set_tcp_user_timeout` to only be called when enabled + +### Changed + +* Updated repository links to use `rust-postgres` organization. +* Upgraded to Rust 2021 edition. +* Upgraded `phf` to v0.13 +* Upgraded `socket2` to v0.6 +* Disabled default features of `futures-util` + +## v0.7.13 - 2025-02-02 + +### Added + +* Added support for direct TLS negotiation. +* Added support for `cidr` 0.3 via the `with-cidr-0_3` feature. + +### Fixes + +* Added `load_balance_hosts` to `Config`'s `Debug` implementation. + +### Changes + +* Upgraded `rand`. + +## v0.7.12 - 2024-09-15 + +### Fixed + +* Fixed `query_typed` queries that return no rows. + +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. +* Added support for TCP keepalive on AIX. ## v0.7.11 - 2024-07-21 @@ -285,4 +342,4 @@ Look at the [release tags] for information about older releases. -[release tags]: https://github.com/sfackler/rust-postgres/releases +[release tags]: https://github.com/rust-postgres/rust-postgres/releases diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f0e7fdb3e..ccd38198d 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,14 +1,15 @@ [package] name = "tokio-postgres" -version = "0.7.11" +version = "0.7.15" authors = ["Steven Fackler "] -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" description = "A native, asynchronous PostgreSQL client" -repository = "https://github.com/sfackler/rust-postgres" +repository = "https://github.com/rust-postgres/rust-postgres" readme = "../README.md" keywords = ["database", "postgres", "postgresql", "sql", "async"] categories = ["database"] +rust-version = "1.81" [lib] test = false @@ -20,20 +21,23 @@ harness = false [package.metadata.docs.rs] all-features = true -[badges] -circle-ci = { repository = "sfackler/rust-postgres" } - [features] default = ["runtime"] runtime = ["tokio/net", "tokio/time"] array-impls = ["postgres-types/array-impls"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] +with-bit-vec-0_7 = ["postgres-types/with-bit-vec-0_7"] +with-bit-vec-0_8 = ["postgres-types/with-bit-vec-0_8"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] +with-cidr-0_2 = ["postgres-types/with-cidr-0_2"] +with-cidr-0_3 = ["postgres-types/with-cidr-0_3"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] with-eui48-1 = ["postgres-types/with-eui48-1"] with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] +with-jiff-0_1 = ["postgres-types/with-jiff-0_1"] +with-jiff-0_2 = ["postgres-types/with-jiff-0_2"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-smol_str-01 = ["postgres-types/with-smol_str-01"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] @@ -48,26 +52,27 @@ bytes = "1.0" byteorder = "1.0" fallible-iterator = "0.2" futures-channel = { version = "0.3", features = ["sink"] } -futures-util = { version = "0.3", features = ["sink"] } +futures-util = { version = "0.3", default-features = false, features = ["sink"] } log = "0.4" parking_lot = "0.12" percent-encoding = "2.0" -pin-project-lite = "0.2" -phf = "0.11" -postgres-protocol = { version = "0.6.7", path = "../postgres-protocol" } -postgres-types = { version = "0.2.7", path = "../postgres-types" } -serde = { version = "1.0", optional = true } +pin-project-lite = "0.2.11" +phf = "0.13" +postgres-protocol = { version = "0.6.9", path = "../postgres-protocol" } +postgres-types = { version = "0.2.10", path = "../postgres-types" } +serde = { version = "1.0", features = [ "derive" ], optional = true } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } -rand = "0.8.5" +rand = "0.9.0" whoami = "1.4.1" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -socket2 = { version = "0.5", features = ["all"] } +socket2 = { version = "0.6", features = ["all"] } [dev-dependencies] +futures-util = { version = "0.3", default-features = false, features = ["async-await-macro"] } futures-executor = "0.3" -criterion = "0.5" +criterion = "0.7" env_logger = "0.11" tokio = { version = "1.0", features = [ "macros", @@ -78,10 +83,14 @@ tokio = { version = "1.0", features = [ ] } bit-vec-06 = { version = "0.6", package = "bit-vec" } +bit-vec-07 = { version = "0.7", package = "bit-vec" } +bit-vec-08 = { version = "0.8", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } eui48-1 = { version = "1.0", package = "eui48", default-features = false } geo-types-06 = { version = "0.6", package = "geo-types" } geo-types-07 = { version = "0.7", package = "geo-types" } +jiff-01 = { version = "0.1", package = "jiff" } +jiff-02 = { version = "0.2", package = "jiff" } serde_json-1 = { version = "1.0", package = "serde_json" } smol_str-01 = { version = "0.1", package = "smol_str" } uuid-08 = { version = "0.8", package = "uuid" } diff --git a/tokio-postgres/src/binary_copy.rs b/tokio-postgres/src/binary_copy.rs index dab141663..8c244371c 100644 --- a/tokio-postgres/src/binary_copy.rs +++ b/tokio-postgres/src/binary_copy.rs @@ -4,16 +4,15 @@ use crate::types::{FromSql, IsNull, ToSql, Type, WrongType}; use crate::{slice_iter, CopyInSink, CopyOutStream, Error}; use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use futures_util::{ready, SinkExt, Stream}; +use futures_util::{SinkExt, Stream}; use pin_project_lite::pin_project; use postgres_types::BorrowToSql; -use std::convert::TryFrom; use std::io; use std::io::Cursor; use std::ops::Range; use std::pin::Pin; use std::sync::Arc; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; const MAGIC: &[u8] = b"PGCOPY\n\xff\r\n\0"; const HEADER_LEN: usize = MAGIC.len() + 4 + 4; diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index 078d4b8b6..2dfd47c06 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -1,5 +1,5 @@ use crate::client::SocketConfig; -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::tls::MakeTlsConnect; use crate::{cancel_query_raw, connect_socket, Error, Socket}; use std::io; @@ -7,6 +7,7 @@ use std::io; pub(crate) async fn cancel_query( config: Option, ssl_mode: SslMode, + ssl_negotiation: SslNegotiation, mut tls: T, process_id: i32, secret_key: i32, @@ -38,6 +39,14 @@ where ) .await?; - cancel_query_raw::cancel_query_raw(socket, ssl_mode, tls, has_hostname, process_id, secret_key) - .await + cancel_query_raw::cancel_query_raw( + socket, + ssl_mode, + ssl_negotiation, + tls, + has_hostname, + process_id, + secret_key, + ) + .await } diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index 41aafe7d9..886606497 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -1,4 +1,4 @@ -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::tls::TlsConnect; use crate::{connect_tls, Error}; use bytes::BytesMut; @@ -8,6 +8,7 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; pub async fn cancel_query_raw( stream: S, mode: SslMode, + negotiation: SslNegotiation, tls: T, has_hostname: bool, process_id: i32, @@ -17,7 +18,7 @@ where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let mut stream = connect_tls::connect_tls(stream, mode, tls, has_hostname).await?; + let mut stream = connect_tls::connect_tls(stream, mode, negotiation, tls, has_hostname).await?; let mut buf = BytesMut::new(); frontend::cancel_request(process_id, secret_key, &mut buf); diff --git a/tokio-postgres/src/cancel_token.rs b/tokio-postgres/src/cancel_token.rs index c925ce0ca..1652bec72 100644 --- a/tokio-postgres/src/cancel_token.rs +++ b/tokio-postgres/src/cancel_token.rs @@ -1,4 +1,4 @@ -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::tls::TlsConnect; #[cfg(feature = "runtime")] use crate::{cancel_query, client::SocketConfig, tls::MakeTlsConnect, Socket}; @@ -12,6 +12,7 @@ pub struct CancelToken { #[cfg(feature = "runtime")] pub(crate) socket_config: Option, pub(crate) ssl_mode: SslMode, + pub(crate) ssl_negotiation: SslNegotiation, pub(crate) process_id: i32, pub(crate) secret_key: i32, } @@ -37,6 +38,7 @@ impl CancelToken { cancel_query::cancel_query( self.socket_config.clone(), self.ssl_mode, + self.ssl_negotiation, tls, self.process_id, self.secret_key, @@ -54,6 +56,7 @@ impl CancelToken { cancel_query_raw::cancel_query_raw( stream, self.ssl_mode, + self.ssl_negotiation, tls, true, self.process_id, diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index eab65d30a..8fe8090a3 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,5 +1,5 @@ use crate::codec::{BackendMessages, FrontendMessage}; -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::connection::{Request, RequestMessages}; use crate::copy_both::{CopyBothDuplex, CopyBothReceiver}; use crate::copy_out::CopyOutStream; @@ -21,19 +21,22 @@ use crate::{ use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use futures_channel::mpsc; -use futures_util::{future, pin_mut, ready, Stream, StreamExt, TryStreamExt}; +use futures_util::{Stream, StreamExt, TryStreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; +use postgres_protocol::message::frontend; use postgres_types::BorrowToSql; use std::collections::HashMap; use std::fmt; +use std::future; #[cfg(feature = "runtime")] use std::net::IpAddr; #[cfg(feature = "runtime")] use std::path::PathBuf; +use std::pin::pin; use std::pin::Pin; use std::sync::Arc; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; #[cfg(feature = "runtime")] use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; @@ -225,6 +228,7 @@ pub struct Client { #[cfg(feature = "runtime")] socket_config: Option, ssl_mode: SslMode, + ssl_negotiation: SslNegotiation, process_id: i32, secret_key: i32, } @@ -233,6 +237,7 @@ impl Client { pub(crate) fn new( sender: mpsc::UnboundedSender, ssl_mode: SslMode, + ssl_negotiation: SslNegotiation, process_id: i32, secret_key: i32, ) -> Client { @@ -245,6 +250,7 @@ impl Client { #[cfg(feature = "runtime")] socket_config: None, ssl_mode, + ssl_negotiation, process_id, secret_key, } @@ -342,8 +348,7 @@ impl Client { where T: ?Sized + ToStatement, { - let stream = self.query_raw(statement, slice_iter(params)).await?; - pin_mut!(stream); + let mut stream = pin!(self.query_raw(statement, slice_iter(params)).await?); let mut first = None; @@ -378,18 +383,18 @@ impl Client { /// /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { - /// use futures_util::{pin_mut, TryStreamExt}; + /// use std::pin::pin; + /// use futures_util::TryStreamExt; /// /// let params: Vec = vec![ /// "first param".into(), /// "second param".into(), /// ]; - /// let mut it = client.query_raw( + /// let mut it = pin!(client.query_raw( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", /// params, - /// ).await?; + /// ).await?); /// - /// pin_mut!(it); /// while let Some(row) = it.try_next().await? { /// let foo: i32 = row.get("foo"); /// println!("foo: {}", foo); @@ -444,19 +449,19 @@ impl Client { /// /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { - /// use futures_util::{pin_mut, TryStreamExt}; + /// use std::pin::pin; + /// use futures_util::{TryStreamExt}; /// use tokio_postgres::types::Type; /// /// let params: Vec<(String, Type)> = vec![ /// ("first param".into(), Type::TEXT), /// ("second param".into(), Type::TEXT), /// ]; - /// let mut it = client.query_typed_raw( + /// let mut it = pin!(client.query_typed_raw( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", /// params, - /// ).await?; + /// ).await?); /// - /// pin_mut!(it); /// while let Some(row) = it.try_next().await? { /// let foo: i32 = row.get("foo"); /// println!("foo: {}", foo); @@ -577,7 +582,20 @@ impl Client { self.simple_query_raw(query).await?.try_collect().await } - pub(crate) async fn simple_query_raw(&self, query: &str) -> Result { + /// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows as a stream. + /// + /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that + /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, + /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a list of the + /// rows, this method returns a list of an enum which indicates either the completion of one of the commands, + /// or a row of data. This preserves the framing between the separate statements in the request. + /// + /// # Warning + /// + /// Prepared statements should be use for any query which contains user-specified data, as they provided the + /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass + /// them to this method! + pub async fn simple_query_raw(&self, query: &str) -> Result { simple_query::simple_query(self.inner(), query).await } @@ -595,6 +613,12 @@ impl Client { simple_query::batch_execute(self.inner(), query).await } + /// Check that the connection is alive and wait for the confirmation. + pub async fn check_connection(&self) -> Result<(), Error> { + // sync is a very quick message to test the connection health. + query::sync(self.inner()).await + } + /// Begins a new database transaction. /// /// The transaction will roll back by default - use the `commit` method to commit it. @@ -622,6 +646,7 @@ impl Client { #[cfg(feature = "runtime")] socket_config: self.socket_config.clone(), ssl_mode: self.ssl_mode, + ssl_negotiation: self.ssl_negotiation, process_id: self.process_id, secret_key: self.secret_key, } @@ -669,6 +694,21 @@ impl Client { self.inner.sender.is_closed() } + #[doc(hidden)] + pub fn __private_api_rollback(&self, name: Option<&str>) { + let buf = self.inner().with_buf(|buf| { + if let Some(name) = name { + frontend::query(&format!("ROLLBACK TO {}", name), buf).unwrap(); + } else { + frontend::query("ROLLBACK", buf).unwrap(); + } + buf.split().freeze() + }); + let _ = self + .inner() + .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + } + #[doc(hidden)] pub fn __private_api_close(&mut self) { self.inner.sender.close_channel() diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index e94eac459..6fad68d63 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1,5 +1,7 @@ //! Connection configuration. +#![allow(clippy::doc_overindented_list_items)] + #[cfg(feature = "runtime")] use crate::connect::connect; use crate::connect_raw::connect_raw; @@ -57,6 +59,20 @@ pub enum SslMode { VerifyFull, } +/// TLS negotiation configuration +/// +/// See more information at +/// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNECT-SSLNEGOTIATION +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +#[non_exhaustive] +pub enum SslNegotiation { + /// Use PostgreSQL SslRequest for Ssl negotiation + #[default] + Postgres, + /// Start Ssl handshake without negotiation, only works for PostgreSQL 17+ + Direct, +} + /// Channel binding configuration. #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[non_exhaustive] @@ -135,6 +151,15 @@ pub enum Host { /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `sslnegotiation` - TLS negotiation method. If set to `direct`, the client +/// will perform direct TLS handshake, this only works for PostgreSQL 17 and +/// newer. +/// Note that you will need to setup ALPN of TLS client configuration to +/// `postgresql` when using direct TLS. If you are using postgres_openssl +/// as TLS backend, a `postgres_openssl::set_postgresql_alpn` helper is +/// provided for that. +/// If set to `postgres`, the default value, it follows original postgres +/// wire protocol to perform the negotiation. /// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, /// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. /// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, @@ -184,7 +209,7 @@ pub enum Host { /// ``` /// /// ```not_rust -/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' +/// host=/var/run/postgresql,localhost port=1234 user=postgres password='password with spaces' /// ``` /// /// ```not_rust @@ -209,7 +234,7 @@ pub enum Host { /// ``` /// /// ```not_rust -/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 +/// postgresql://user:password@%2Fvar%2Frun%2Fpostgresql/mydb?connect_timeout=10 /// ``` /// /// ```not_rust @@ -217,7 +242,7 @@ pub enum Host { /// ``` /// /// ```not_rust -/// postgresql:///mydb?user=user&host=/var/lib/postgresql +/// postgresql:///mydb?user=user&host=/var/run/postgresql /// ``` #[derive(Clone, PartialEq, Eq)] pub struct Config { @@ -230,6 +255,7 @@ pub struct Config { pub(crate) ssl_key: Option>, pub(crate) ssl_mode: SslMode, pub(crate) ssl_root_cert: Option>, + pub(crate) ssl_negotiation: SslNegotiation, pub(crate) host: Vec, pub(crate) hostaddr: Vec, pub(crate) port: Vec, @@ -263,6 +289,7 @@ impl Config { ssl_key: None, ssl_mode: SslMode::Prefer, ssl_root_cert: None, + ssl_negotiation: SslNegotiation::Postgres, host: vec![], hostaddr: vec![], port: vec![], @@ -401,6 +428,19 @@ impl Config { self.ssl_root_cert.as_deref() } + /// Sets the SSL negotiation method. + /// + /// Defaults to `postgres`. + pub fn ssl_negotiation(&mut self, ssl_negotiation: SslNegotiation) -> &mut Config { + self.ssl_negotiation = ssl_negotiation; + self + } + + /// Gets the SSL negotiation method. + pub fn get_ssl_negotiation(&self) -> SslNegotiation { + self.ssl_negotiation + } + /// Adds a host to the configuration. /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix @@ -677,6 +717,18 @@ impl Config { "sslrootcert_inline" => { self.ssl_root_cert(value.as_bytes()); } + "sslnegotiation" => { + let mode = match value { + "postgres" => SslNegotiation::Postgres, + "direct" => SslNegotiation::Direct, + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "sslnegotiation", + )))) + } + }; + self.ssl_negotiation(mode); + } "host" => { for host in value.split(',') { self.host(host); @@ -886,6 +938,7 @@ impl fmt::Debug for Config { .field("target_session_attrs", &self.target_session_attrs) .field("channel_binding", &self.channel_binding) .field("replication", &self.replication_mode) + .field("load_balance_hosts", &self.load_balance_hosts) .finish() } } @@ -961,10 +1014,8 @@ impl<'a> Parser<'a> { match self.it.next() { Some((_, c)) if c == target => Ok(()), Some((i, c)) => { - let m = format!( - "unexpected character at byte {}: expected `{}` but got `{}`", - i, target, c - ); + let m = + format!("unexpected character at byte {i}: expected `{target}` but got `{c}`"); Err(Error::config_parse(m.into())) } None => Err(Error::config_parse("unexpected EOF".into())), diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 8189cb91c..67c6e4baa 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -4,8 +4,10 @@ use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::MakeTlsConnect; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; -use futures_util::{future, pin_mut, Future, FutureExt, Stream}; +use futures_util::{FutureExt, Stream}; use rand::seq::SliceRandom; +use std::future::{self, Future}; +use std::pin::pin; use std::task::Poll; use std::{cmp, io}; use tokio::net; @@ -44,7 +46,7 @@ where let mut indices = (0..num_hosts).collect::>(); if config.load_balance_hosts == LoadBalanceHosts::Random { - indices.shuffle(&mut rand::thread_rng()); + indices.shuffle(&mut rand::rng()); } let mut error = None; @@ -101,7 +103,7 @@ where .collect::>(); if config.load_balance_hosts == LoadBalanceHosts::Random { - addrs.shuffle(&mut rand::thread_rng()); + addrs.shuffle(&mut rand::rng()); } let mut last_err = None; @@ -161,18 +163,18 @@ where let (mut client, mut connection) = connect_raw(socket, tls, has_hostname, config).await?; if config.target_session_attrs != TargetSessionAttrs::Any { - let rows = client.simple_query_raw("SHOW transaction_read_only"); - pin_mut!(rows); + let mut rows = pin!(client.simple_query_raw("SHOW transaction_read_only")); - let rows = future::poll_fn(|cx| { - if connection.poll_unpin(cx)?.is_ready() { - return Poll::Ready(Err(Error::closed())); - } + let mut rows = pin!( + future::poll_fn(|cx| { + if connection.poll_unpin(cx)?.is_ready() { + return Poll::Ready(Err(Error::closed())); + } - rows.as_mut().poll(cx) - }) - .await?; - pin_mut!(rows); + rows.as_mut().poll(cx) + }) + .await? + ); loop { let next = future::poll_fn(|cx| { diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 8edf45937..621d6abc6 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -7,7 +7,7 @@ use crate::{Client, Connection, Error}; use bytes::BytesMut; use fallible_iterator::FallibleIterator; use futures_channel::mpsc; -use futures_util::{ready, Sink, SinkExt, Stream, TryStreamExt}; +use futures_util::{Sink, SinkExt, Stream, TryStreamExt}; use postgres_protocol::authentication; use postgres_protocol::authentication::sasl; use postgres_protocol::authentication::sasl::ScramSha256; @@ -17,7 +17,7 @@ use std::borrow::Cow; use std::collections::{HashMap, VecDeque}; use std::io; use std::pin::Pin; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::codec::Framed; @@ -89,7 +89,14 @@ where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let stream = connect_tls(stream, config.ssl_mode, tls, has_hostname).await?; + let stream = connect_tls( + stream, + config.ssl_mode, + config.ssl_negotiation, + tls, + has_hostname, + ) + .await?; let mut stream = StartupStream { inner: Framed::new(stream, PostgresCodec), @@ -107,7 +114,13 @@ where let (process_id, secret_key, parameters) = read_info(&mut stream).await?; let (sender, receiver) = mpsc::unbounded(); - let client = Client::new(sender, config.ssl_mode, process_id, secret_key); + let client = Client::new( + sender, + config.ssl_mode, + config.ssl_negotiation, + process_id, + secret_key, + ); let connection = Connection::new(stream.inner, stream.delayed, parameters, receiver); Ok((client, connection)) diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index f27131178..0001cef19 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -27,10 +27,11 @@ pub(crate) async fn connect_socket( stream.set_nodelay(true).map_err(Error::connect)?; let sock_ref = SockRef::from(&stream); + #[cfg(target_os = "linux")] - { + if let Some(tcp_user_timeout) = tcp_user_timeout { sock_ref - .set_tcp_user_timeout(tcp_user_timeout) + .set_tcp_user_timeout(Some(tcp_user_timeout)) .map_err(Error::connect)?; } @@ -44,7 +45,7 @@ pub(crate) async fn connect_socket( } #[cfg(unix)] Addr::Unix(dir) => { - let path = dir.join(format!(".s.PGSQL.{}", port)); + let path = dir.join(format!(".s.PGSQL.{port}")); let socket = connect_with_timeout(UnixStream::connect(path), connect_timeout).await?; Ok(Socket::new_unix(socket)) } diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index 41b319c2b..f614b43dc 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -1,4 +1,4 @@ -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::private::ForcePrivateApi; use crate::tls::TlsConnect; @@ -10,6 +10,7 @@ use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; pub async fn connect_tls( mut stream: S, mode: SslMode, + negotiation: SslNegotiation, tls: T, has_hostname: bool, ) -> Result, Error> @@ -22,22 +23,28 @@ where SslMode::Prefer if !tls.can_connect(ForcePrivateApi) => { return Ok(MaybeTlsStream::Raw(stream)) } + SslMode::Prefer if negotiation == SslNegotiation::Direct => return Err(Error::tls( + "weak sslmode \"prefer\" may not be used with sslnegotiation=direct (use \"require\")" + .into(), + )), SslMode::Prefer | SslMode::Require | SslMode::VerifyCa | SslMode::VerifyFull => {} } - let mut buf = BytesMut::new(); - frontend::ssl_request(&mut buf); - stream.write_all(&buf).await.map_err(Error::io)?; + if negotiation == SslNegotiation::Postgres { + let mut buf = BytesMut::new(); + frontend::ssl_request(&mut buf); + stream.write_all(&buf).await.map_err(Error::io)?; - let mut buf = [0]; - stream.read_exact(&mut buf).await.map_err(Error::io)?; + let mut buf = [0]; + stream.read_exact(&mut buf).await.map_err(Error::io)?; - if buf[0] != b'S' { - match mode { - SslMode::Require | SslMode::VerifyCa | SslMode::VerifyFull => { - return Err(Error::tls("server does not support TLS".into())) + if buf[0] != b'S' { + match mode { + SslMode::Require | SslMode::VerifyCa | SslMode::VerifyFull => { + return Err(Error::tls("server does not support TLS".into())) + } + SslMode::Disable | SslMode::Prefer => return Ok(MaybeTlsStream::Raw(stream)), } - SslMode::Disable | SslMode::Prefer => return Ok(MaybeTlsStream::Raw(stream)), } } diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index a3449f88b..f066ba10f 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -7,14 +7,14 @@ use crate::{AsyncMessage, Error, Notification}; use bytes::BytesMut; use fallible_iterator::FallibleIterator; use futures_channel::mpsc; -use futures_util::{ready, stream::FusedStream, Sink, Stream, StreamExt}; +use futures_util::{stream::FusedStream, Sink, Stream, StreamExt}; use log::{info, trace}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::collections::{HashMap, VecDeque}; use std::future::Future; use std::pin::Pin; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::codec::Framed; @@ -318,14 +318,7 @@ where self.parameters.get(name).map(|s| &**s) } - /// Polls for asynchronous messages from the server. - /// - /// The server can send notices as well as notifications asynchronously to the client. Applications that wish to - /// examine those messages should use this method to drive the connection rather than its `Future` implementation. - /// - /// Return values of `None` or `Some(Err(_))` are "terminal"; callers should not invoke this method again after - /// receiving one of those values. - pub fn poll_message( + fn poll_message_inner( &mut self, cx: &mut Context<'_>, ) -> Poll>> { @@ -343,6 +336,26 @@ where }, } } + + /// Polls for asynchronous messages from the server. + /// + /// The server can send notices as well as notifications asynchronously to the client. Applications that wish to + /// examine those messages should use this method to drive the connection rather than its `Future` implementation. + /// + /// Return values of `None` or `Some(Err(_))` are "terminal"; callers should not invoke this method again after + /// receiving one of those values. + pub fn poll_message( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + match self.poll_message_inner(cx) { + nominal @ (Poll::Pending | Poll::Ready(Some(Ok(_)))) => nominal, + terminal @ (Poll::Ready(None) | Poll::Ready(Some(Err(_)))) => { + self.receiver.close(); + terminal + } + } + } } impl Future for Connection diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index b3fdba84a..8cdb817ac 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -5,15 +5,16 @@ use crate::query::extract_row_affected; use crate::{query, simple_query, slice_iter, Error, Statement}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use futures_channel::mpsc; -use futures_util::{future, ready, Sink, SinkExt, Stream, StreamExt}; +use futures_util::{Sink, SinkExt, Stream, StreamExt}; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use postgres_protocol::message::frontend::CopyData; -use std::marker::{PhantomData, PhantomPinned}; +use std::future; +use std::marker::PhantomData; use std::pin::Pin; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; enum CopyInMessage { Message(FrontendMessage), @@ -73,14 +74,13 @@ pin_project! { /// /// The copy *must* be explicitly completed via the `Sink::close` or `finish` methods. If it is /// not, the copy will be aborted. + #[project(!Unpin)] pub struct CopyInSink { #[pin] sender: mpsc::Sender, responses: Responses, buf: BytesMut, state: SinkState, - #[pin] - _p: PhantomPinned, _p2: PhantomData, } } @@ -218,7 +218,6 @@ where responses, buf: BytesMut::new(), state: SinkState::Active, - _p: PhantomPinned, _p2: PhantomData, }) } diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 981f9365e..873e9eee0 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -3,23 +3,19 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{query, simple_query, slice_iter, Error, Statement}; use bytes::Bytes; -use futures_util::{ready, Stream}; +use futures_util::Stream; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; -use std::marker::PhantomPinned; use std::pin::Pin; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; pub async fn copy_out_simple(client: &InnerClient, query: &str) -> Result { debug!("executing copy out query {}", query); let buf = simple_query::encode(client, query)?; let responses = start(client, buf, true).await?; - Ok(CopyOutStream { - responses, - _p: PhantomPinned, - }) + Ok(CopyOutStream { responses }) } pub async fn copy_out(client: &InnerClient, statement: Statement) -> Result { @@ -27,10 +23,7 @@ pub async fn copy_out(client: &InnerClient, statement: Statement) -> Result Result { @@ -53,10 +46,9 @@ async fn start(client: &InnerClient, buf: Bytes, simple: bool) -> Result) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message)?; if let Some(detail) = &self.detail { - write!(fmt, "\nDETAIL: {}", detail)?; + write!(fmt, "\nDETAIL: {detail}")?; } if let Some(hint) = &self.hint { - write!(fmt, "\nHINT: {}", hint)?; + write!(fmt, "\nHINT: {hint}")?; } Ok(()) } @@ -380,31 +380,27 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.0.kind { - Kind::Io => fmt.write_str("error communicating with the server")?, - Kind::UnexpectedMessage => fmt.write_str("unexpected message from server")?, - Kind::Tls => fmt.write_str("error performing TLS handshake")?, - Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, - Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, - Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, + Kind::Io => fmt.write_str("error communicating with the server"), + Kind::UnexpectedMessage => fmt.write_str("unexpected message from server"), + Kind::Tls => fmt.write_str("error performing TLS handshake"), + Kind::ToSql(idx) => write!(fmt, "error serializing parameter {idx}"), + Kind::FromSql(idx) => write!(fmt, "error deserializing column {idx}"), + Kind::Column(column) => write!(fmt, "invalid column `{column}`"), Kind::Parameters(real, expected) => { - write!(fmt, "expected {expected} parameters but got {real}")? + write!(fmt, "expected {expected} parameters but got {real}") } - Kind::Closed => fmt.write_str("connection closed")?, - Kind::Db => fmt.write_str("db error")?, - Kind::Parse => fmt.write_str("error parsing response from server")?, - Kind::Encode => fmt.write_str("error encoding message to server")?, - Kind::Authentication => fmt.write_str("authentication error")?, - Kind::ConfigParse => fmt.write_str("invalid connection string")?, - Kind::Config => fmt.write_str("invalid configuration")?, - Kind::RowCount => fmt.write_str("query returned an unexpected number of rows")?, + Kind::Closed => fmt.write_str("connection closed"), + Kind::Db => fmt.write_str("db error"), + Kind::Parse => fmt.write_str("error parsing response from server"), + Kind::Encode => fmt.write_str("error encoding message to server"), + Kind::Authentication => fmt.write_str("authentication error"), + Kind::ConfigParse => fmt.write_str("invalid connection string"), + Kind::Config => fmt.write_str("invalid configuration"), + Kind::RowCount => fmt.write_str("query returned an unexpected number of rows"), #[cfg(feature = "runtime")] - Kind::Connect => fmt.write_str("error connecting to server")?, - Kind::Timeout => fmt.write_str("timeout waiting for server")?, - }; - if let Some(ref cause) = self.0.cause { - write!(fmt, ": {}", cause)?; + Kind::Connect => fmt.write_str("error connecting to server"), + Kind::Timeout => fmt.write_str("timeout waiting for server"), } - Ok(()) } } diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 13a1d75f9..8c3c1cfe0 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -1350,321 +1350,321 @@ enum Inner { #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 12913932095322966823, + key: 2689841203009609170, disps: &[ - (0, 24), - (0, 12), - (0, 74), - (0, 109), + (0, 178), + (0, 2), + (0, 45), + (1, 10), + (3, 12), + (0, 86), (0, 11), - (0, 9), + (0, 159), + (0, 129), (0, 0), - (4, 38), - (3, 155), - (0, 6), - (1, 242), - (0, 66), - (0, 53), - (5, 180), - (3, 221), - (7, 230), - (0, 125), - (1, 46), - (0, 11), - (1, 2), - (0, 5), - (0, 13), - (0, 171), - (0, 15), - (0, 4), - (0, 22), - (1, 85), - (0, 75), - (2, 0), - (1, 25), - (7, 47), - (0, 45), - (0, 35), - (0, 7), - (7, 124), + (0, 110), + (0, 87), + (0, 1), (0, 0), - (14, 104), - (1, 183), - (61, 50), - (3, 76), (0, 12), - (0, 7), - (4, 189), + (0, 17), + (2, 258), + (9, 119), + (0, 10), + (0, 58), + (0, 104), + (0, 89), + (0, 104), + (0, 23), + (0, 135), + (0, 5), + (0, 0), (0, 1), - (64, 102), + (0, 8), + (0, 16), + (17, 61), + (4, 220), + (0, 245), + (0, 28), + (9, 28), + (39, 31), + (2, 79), + (0, 113), + (0, 203), + (0, 2), + (0, 218), + (1, 101), + (18, 165), + (0, 192), (0, 0), - (16, 192), - (24, 19), - (0, 5), - (0, 87), - (0, 89), - (0, 14), + (3, 78), + (0, 86), + (0, 12), + (7, 142), + (0, 0), + (10, 234), + (15, 27), ], entries: &[ - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("22000", SqlState::DATA_EXCEPTION), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), - ("2200N", SqlState::INVALID_XML_CONTENT), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("28P01", SqlState::INVALID_PASSWORD), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("F0000", SqlState::CONFIG_FILE_ERROR), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("42710", SqlState::DUPLICATE_OBJECT), - ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("55006", SqlState::OBJECT_IN_USE), - ("53200", SqlState::OUT_OF_MEMORY), - ("22012", SqlState::DIVISION_BY_ZERO), - ("P0002", SqlState::NO_DATA_FOUND), - ("XX001", SqlState::DATA_CORRUPTED), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("42804", SqlState::DATATYPE_MISMATCH), - ("42803", SqlState::GROUPING_ERROR), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), + ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), + ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), + ("22035", SqlState::NO_SQL_JSON_ITEM), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), + ("57P01", SqlState::ADMIN_SHUTDOWN), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("42P19", SqlState::INVALID_RECURSION), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("22012", SqlState::DIVISION_BY_ZERO), + ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("22032", SqlState::INVALID_JSON_TEXT), ("42723", SqlState::DUPLICATE_FUNCTION), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("22P01", SqlState::FLOATING_POINT_EXCEPTION), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("42804", SqlState::DATATYPE_MISMATCH), + ("42P09", SqlState::AMBIGUOUS_ALIAS), ("23505", SqlState::UNIQUE_VIOLATION), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), ("23P01", SqlState::EXCLUSION_VIOLATION), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("P0000", SqlState::PLPGSQL_ERROR), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("23514", SqlState::CHECK_VIOLATION), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("HV00K", SqlState::FDW_REPLY_HANDLE), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("42P08", SqlState::AMBIGUOUS_PARAMETER), ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("42712", SqlState::DUPLICATE_ALIAS), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("58030", SqlState::IO_ERROR), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("20000", SqlState::CASE_NOT_FOUND), - ("2203G", SqlState::SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE), - ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("22000", SqlState::DATA_EXCEPTION), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("42704", SqlState::UNDEFINED_OBJECT), + ("08006", SqlState::CONNECTION_FAILURE), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), + ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), + ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), + ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("42701", SqlState::DUPLICATE_COLUMN), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("P0002", SqlState::NO_DATA_FOUND), ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("P0004", SqlState::ASSERT_FAILURE), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("0L000", SqlState::INVALID_GRANTOR), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), + ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("58P02", SqlState::DUPLICATE_FILE), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("42939", SqlState::RESERVED_NAME), + ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("42P20", SqlState::WINDOWING_ERROR), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("02000", SqlState::NO_DATA), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("42701", SqlState::DUPLICATE_COLUMN), - ("08P01", SqlState::PROTOCOL_VIOLATION), - ("42622", SqlState::NAME_TOO_LONG), - ("P0003", SqlState::TOO_MANY_ROWS), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("23001", SqlState::RESTRICT_VIOLATION), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("22027", SqlState::TRIM_ERROR), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("428C9", SqlState::GENERATED_ALWAYS), - ("2200S", SqlState::INVALID_XML_COMMENT), + ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("42703", SqlState::UNDEFINED_COLUMN), + ("P0004", SqlState::ASSERT_FAILURE), + ("0L000", SqlState::INVALID_GRANTOR), ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("54001", SqlState::STATEMENT_TOO_COMPLEX), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("53100", SqlState::DISK_FULL), - ("42601", SqlState::SYNTAX_ERROR), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("42P21", SqlState::COLLATION_MISMATCH), + ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("HV00P", SqlState::FDW_NO_SCHEMAS), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("42P07", SqlState::DUPLICATE_TABLE), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("42611", SqlState::INVALID_COLUMN_DEFINITION), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("XX002", SqlState::INDEX_CORRUPTED), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), ("01000", SqlState::WARNING), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("57P01", SqlState::ADMIN_SHUTDOWN), - ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), - ("00000", SqlState::SUCCESSFUL_COMPLETION), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("42602", SqlState::INVALID_NAME), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("42P20", SqlState::WINDOWING_ERROR), ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("42P01", SqlState::UNDEFINED_TABLE), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("XX002", SqlState::INDEX_CORRUPTED), - ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("58000", SqlState::SYSTEM_ERROR), + ("55006", SqlState::OBJECT_IN_USE), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("54011", SqlState::TOO_MANY_COLUMNS), ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("57000", SqlState::OPERATOR_INTERVENTION), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("428C9", SqlState::GENERATED_ALWAYS), + ("2200N", SqlState::INVALID_XML_CONTENT), + ("57014", SqlState::QUERY_CANCELED), + ("58P02", SqlState::DUPLICATE_FILE), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("P0001", SqlState::RAISE_EXCEPTION), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("57P02", SqlState::CRASH_SHUTDOWN), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("22032", SqlState::INVALID_JSON_TEXT), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("42P19", SqlState::INVALID_RECURSION), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), - ("XX000", SqlState::INTERNAL_ERROR), - ("08006", SqlState::CONNECTION_FAILURE), - ("57P04", SqlState::DATABASE_DROPPED), - ("42P07", SqlState::DUPLICATE_TABLE), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("22035", SqlState::NO_SQL_JSON_ITEM), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("P0000", SqlState::PLPGSQL_ERROR), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("23001", SqlState::RESTRICT_VIOLATION), + ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("42P21", SqlState::COLLATION_MISMATCH), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("22011", SqlState::SUBSTRING_ERROR), - ("42602", SqlState::INVALID_NAME), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), - ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("58P01", SqlState::UNDEFINED_FILE), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("42703", SqlState::UNDEFINED_COLUMN), - ("57P05", SqlState::IDLE_SESSION_TIMEOUT), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), + ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("XX000", SqlState::INTERNAL_ERROR), + ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), + ("42P01", SqlState::UNDEFINED_TABLE), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), + ("22027", SqlState::TRIM_ERROR), + ("21000", SqlState::CARDINALITY_VIOLATION), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), ("57P03", SqlState::CANNOT_CONNECT_NOW), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("22011", SqlState::SUBSTRING_ERROR), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("25000", SqlState::INVALID_TRANSACTION_STATE), ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), + ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), + ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), - ("23514", SqlState::CHECK_VIOLATION), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), + ("42846", SqlState::CANNOT_COERCE), + ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("42803", SqlState::GROUPING_ERROR), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), - ("HV000", SqlState::FDW_ERROR), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("42710", SqlState::DUPLICATE_OBJECT), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("F0000", SqlState::CONFIG_FILE_ERROR), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("2203G", SqlState::SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE), + ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), + ("42712", SqlState::DUPLICATE_ALIAS), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("22008", SqlState::DATETIME_FIELD_OVERFLOW), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("42601", SqlState::SYNTAX_ERROR), ("3D000", SqlState::INVALID_CATALOG_NAME), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("28P01", SqlState::INVALID_PASSWORD), + ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("42939", SqlState::RESERVED_NAME), - ("58000", SqlState::SYSTEM_ERROR), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), - ("57014", SqlState::QUERY_CANCELED), - ("23502", SqlState::NOT_NULL_VIOLATION), + ("02000", SqlState::NO_DATA), + ("53200", SqlState::OUT_OF_MEMORY), ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("XX001", SqlState::DATA_CORRUPTED), + ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), + ("20000", SqlState::CASE_NOT_FOUND), + ("54023", SqlState::TOO_MANY_ARGUMENTS), ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), - ("42846", SqlState::CANNOT_COERCE), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("42704", SqlState::UNDEFINED_OBJECT), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("24000", SqlState::INVALID_CURSOR_STATE), ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("53100", SqlState::DISK_FULL), + ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("00000", SqlState::SUCCESSFUL_COMPLETION), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), + ("P0001", SqlState::RAISE_EXCEPTION), + ("58P01", SqlState::UNDEFINED_FILE), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("57P04", SqlState::DATABASE_DROPPED), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("HV000", SqlState::FDW_ERROR), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("42622", SqlState::NAME_TOO_LONG), + ("58030", SqlState::IO_ERROR), + ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("57P05", SqlState::IDLE_SESSION_TIMEOUT), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), + ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), + ("P0003", SqlState::TOO_MANY_ROWS), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), + ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), + ("08P01", SqlState::PROTOCOL_VIOLATION), + ("HV00B", SqlState::FDW_INVALID_HANDLE), + ("34000", SqlState::INVALID_CURSOR_NAME), ], }; diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 6e7dffeb1..dcda147b5 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -80,7 +80,7 @@ pub trait GenericClient: private::Sealed { ) -> Result; /// Like [`Client::transaction`]. - async fn transaction(&mut self) -> Result, Error>; + async fn transaction<'a>(&'a mut self) -> Result, Error>; /// Like [`Client::batch_execute`]. async fn batch_execute(&self, query: &str) -> Result<(), Error>; @@ -180,7 +180,7 @@ impl GenericClient for Client { self.prepare_typed(query, parameter_types).await } - async fn transaction(&mut self) -> Result, Error> { + async fn transaction<'a>(&'a mut self) -> Result, Error> { self.transaction().await } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index cde9df841..9de3a8fd1 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -106,11 +106,16 @@ //! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 1.0 with the features `net` and `time` | yes | //! | `array-impls` | Enables `ToSql` and `FromSql` trait impls for arrays | - | no | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | +//! | `with-bit-vec-0_7` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.7 | no | +//! | `with-bit-vec-0_8` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.8 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | +//! | `with-cidr-0_3` | Enable support for the `cidr` crate. | [cidr](https://crates.io/crates/cidr) 0.3 | no | //! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. This is deprecated and will be removed. | [eui48](https://crates.io/crates/eui48) 0.4 | no | //! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | //! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | +//! | `with-jiff-0_1` | Enable support for the 0.1 version of the `jiff` crate. | [jiff](https://crates.io/crates/jiff/0.1.0) 0.1 | no | +//! | `with-jiff-0_2` | Enable support for the 0.2 version of the `jiff` crate. | [jiff](https://crates.io/crates/jiff/0.2.16) 0.2 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-uuid-1` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 1.0 | no | @@ -142,6 +147,7 @@ pub use crate::to_statement::ToStatement; pub use crate::transaction::Transaction; pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder}; use crate::types::ToSql; +pub use fallible_iterator; use std::sync::Arc; pub mod binary_copy; diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 1d9bacb16..cbbb8cdf1 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -7,12 +7,12 @@ use crate::{query, slice_iter}; use crate::{Column, Error, Statement}; use bytes::Bytes; use fallible_iterator::FallibleIterator; -use futures_util::{pin_mut, TryStreamExt}; +use futures_util::TryStreamExt; use log::debug; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -118,9 +118,9 @@ fn prepare_rec<'a>( fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Result { if types.is_empty() { - debug!("preparing query {}: {}", name, query); + debug!("preparing query {name}: {query}"); } else { - debug!("preparing query {} with types {:?}: {}", name, types, query); + debug!("preparing query {name} with types {types:?}: {query}"); } client.with_buf(|buf| { @@ -142,8 +142,7 @@ pub(crate) async fn get_type(client: &Arc, oid: Oid) -> Result row, diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 3ab002871..765d470a7 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -6,21 +6,20 @@ use crate::types::{BorrowToSql, IsNull}; use crate::{Column, Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; use fallible_iterator::FallibleIterator; -use futures_util::{ready, Stream}; +use futures_util::Stream; use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; use postgres_protocol::message::backend::{CommandCompleteBody, Message}; use postgres_protocol::message::frontend; use postgres_types::Type; use std::fmt; -use std::marker::PhantomPinned; use std::pin::Pin; use std::sync::Arc; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; struct BorrowToSqlParamsDebug<'a, T>(&'a [T]); -impl<'a, T> fmt::Debug for BorrowToSqlParamsDebug<'a, T> +impl fmt::Debug for BorrowToSqlParamsDebug<'_, T> where T: BorrowToSql, { @@ -57,11 +56,10 @@ where statement, responses, rows_affected: None, - _p: PhantomPinned, }) } -pub async fn query_typed<'a, P, I>( +pub async fn query_typed( client: &Arc, query: &str, params: I, @@ -95,7 +93,6 @@ where statement: Statement::unnamed(vec![], vec![]), responses, rows_affected: None, - _p: PhantomPinned, }); } Message::RowDescription(row_description) => { @@ -115,7 +112,6 @@ where statement: Statement::unnamed(vec![], columns), responses, rows_affected: None, - _p: PhantomPinned, }); } _ => return Err(Error::unexpected_message()), @@ -140,7 +136,6 @@ pub async fn query_portal( statement: portal.statement().clone(), responses, rows_affected: None, - _p: PhantomPinned, }) } @@ -285,12 +280,11 @@ where pin_project! { /// A stream of table rows. + #[project(!Unpin)] pub struct RowStream { statement: Statement, responses: Responses, rows_affected: Option, - #[pin] - _p: PhantomPinned, } } @@ -323,3 +317,13 @@ impl RowStream { self.rows_affected } } + +pub async fn sync(client: &InnerClient) -> Result<(), Error> { + let buf = Bytes::from_static(b"S\0\0\0\x04"); + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + match responses.next().await? { + Message::ReadyForQuery(_) => Ok(()), + _ => Err(Error::unexpected_message()), + } +} diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 767c26921..3726a5e85 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -79,9 +79,9 @@ impl RowIndex for str { } } -impl<'a, T> Sealed for &'a T where T: ?Sized + Sealed {} +impl Sealed for &T where T: ?Sized + Sealed {} -impl<'a, T> RowIndex for &'a T +impl RowIndex for &T where T: ?Sized + RowIndex, { @@ -184,6 +184,11 @@ impl Row { FromSql::from_sql_nullable(ty, self.col_buffer(idx)).map_err(|e| Error::from_sql(e, idx)) } + /// Returns the raw size of the row in bytes. + pub fn raw_size_bytes(&self) -> usize { + self.body.buffer_bytes().len() + } + /// Get the raw bytes for the column at the given index. fn col_buffer(&self, idx: usize) -> Option<&[u8]> { let range = self.ranges[idx].to_owned()?; diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index a26e43e6e..bf36fc17a 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -5,15 +5,14 @@ use crate::query::extract_row_affected; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use bytes::Bytes; use fallible_iterator::FallibleIterator; -use futures_util::{ready, Stream}; +use futures_util::Stream; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; -use std::marker::PhantomPinned; use std::pin::Pin; use std::sync::Arc; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; /// Information about a column of a single query row. #[derive(Debug)] @@ -33,7 +32,7 @@ impl SimpleColumn { } pub async fn simple_query(client: &InnerClient, query: &str) -> Result { - debug!("executing simple query: {}", query); + debug!("executing simple query: {query}"); let buf = encode(client, query)?; let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; @@ -41,12 +40,11 @@ pub async fn simple_query(client: &InnerClient, query: &str) -> Result Result<(), Error> { - debug!("executing statement batch: {}", query); + debug!("executing statement batch: {query}"); let buf = encode(client, query)?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; @@ -72,11 +70,10 @@ pub(crate) fn encode(client: &InnerClient, query: &str) -> Result pin_project! { /// A stream of simple query results. + #[project(!Unpin)] pub struct SimpleQueryStream { responses: Responses, columns: Option>, - #[pin] - _p: PhantomPinned, } } diff --git a/tokio-postgres/src/to_statement.rs b/tokio-postgres/src/to_statement.rs index 427f77dd7..7e1299272 100644 --- a/tokio-postgres/src/to_statement.rs +++ b/tokio-postgres/src/to_statement.rs @@ -11,7 +11,7 @@ mod private { Query(&'a str), } - impl<'a> ToStatementType<'a> { + impl ToStatementType<'_> { pub async fn into_statement(self, client: &Client) -> Result { match self { ToStatementType::Statement(s) => Ok(s.clone()), diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 17a50b60f..0186da06d 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -1,5 +1,3 @@ -use crate::codec::FrontendMessage; -use crate::connection::RequestMessages; use crate::copy_out::CopyOutStream; use crate::query::RowStream; #[cfg(feature = "runtime")] @@ -14,7 +12,6 @@ use crate::{ }; use bytes::Buf; use futures_util::TryStreamExt; -use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncWrite}; /// A representation of a PostgreSQL database transaction. @@ -33,25 +30,14 @@ struct Savepoint { depth: u32, } -impl<'a> Drop for Transaction<'a> { +impl Drop for Transaction<'_> { fn drop(&mut self) { if self.done { return; } - let query = if let Some(sp) = self.savepoint.as_ref() { - format!("ROLLBACK TO {}", sp.name) - } else { - "ROLLBACK".to_string() - }; - let buf = self.client.inner().with_buf(|buf| { - frontend::query(&query, buf).unwrap(); - buf.split().freeze() - }); - let _ = self - .client - .inner() - .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + let name = self.savepoint.as_ref().map(|sp| sp.name.as_str()); + self.client.__private_api_rollback(name); } } @@ -314,8 +300,8 @@ impl<'a> Transaction<'a> { async fn _savepoint(&mut self, name: Option) -> Result, Error> { let depth = self.savepoint.as_ref().map_or(0, |sp| sp.depth) + 1; - let name = name.unwrap_or_else(|| format!("sp_{}", depth)); - let query = format!("SAVEPOINT {}", name); + let name = name.unwrap_or_else(|| format!("sp_{depth}")); + let query = format!("SAVEPOINT {name}"); self.batch_execute(&query).await?; Ok(Transaction { diff --git a/tokio-postgres/src/transaction_builder.rs b/tokio-postgres/src/transaction_builder.rs index 93e9e9801..8dec56afc 100644 --- a/tokio-postgres/src/transaction_builder.rs +++ b/tokio-postgres/src/transaction_builder.rs @@ -1,6 +1,4 @@ -use postgres_protocol::message::frontend; - -use crate::{codec::FrontendMessage, connection::RequestMessages, Client, Error, Transaction}; +use crate::{Client, Error, Transaction}; /// The isolation level of a database transaction. #[derive(Debug, Copy, Clone)] @@ -113,20 +111,13 @@ impl<'a> TransactionBuilder<'a> { done: bool, } - impl<'a> Drop for RollbackIfNotDone<'a> { + impl Drop for RollbackIfNotDone<'_> { fn drop(&mut self) { if self.done { return; } - let buf = self.client.inner().with_buf(|buf| { - frontend::query("ROLLBACK", buf).unwrap(); - buf.split().freeze() - }); - let _ = self - .client - .inner() - .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + self.client.__private_api_rollback(None); } } diff --git a/tokio-postgres/tests/test/binary_copy.rs b/tokio-postgres/tests/test/binary_copy.rs index 94b96ab85..5a6f0458d 100644 --- a/tokio-postgres/tests/test/binary_copy.rs +++ b/tokio-postgres/tests/test/binary_copy.rs @@ -1,5 +1,6 @@ use crate::connect; -use futures_util::{pin_mut, TryStreamExt}; +use futures_util::TryStreamExt; +use std::pin::pin; use tokio_postgres::binary_copy::{BinaryCopyInWriter, BinaryCopyOutStream}; use tokio_postgres::types::Type; @@ -16,8 +17,7 @@ async fn write_basic() { .copy_in("COPY foo (id, bar) FROM STDIN BINARY") .await .unwrap(); - let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); - pin_mut!(writer); + let mut writer = pin!(BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT])); writer.as_mut().write(&[&1i32, &"foobar"]).await.unwrap(); writer .as_mut() @@ -50,13 +50,12 @@ async fn write_many_rows() { .copy_in("COPY foo (id, bar) FROM STDIN BINARY") .await .unwrap(); - let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); - pin_mut!(writer); + let mut writer = pin!(BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT])); for i in 0..10_000i32 { writer .as_mut() - .write(&[&i, &format!("the value for {}", i)]) + .write(&[&i, &format!("the value for {i}")]) .await .unwrap(); } @@ -69,7 +68,7 @@ async fn write_many_rows() { .unwrap(); for (i, row) in rows.iter().enumerate() { assert_eq!(row.get::<_, i32>(0), i as i32); - assert_eq!(row.get::<_, &str>(1), format!("the value for {}", i)); + assert_eq!(row.get::<_, &str>(1), format!("the value for {i}")); } } @@ -86,8 +85,7 @@ async fn write_big_rows() { .copy_in("COPY foo (id, bar) FROM STDIN BINARY") .await .unwrap(); - let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::BYTEA]); - pin_mut!(writer); + let mut writer = pin!(BinaryCopyInWriter::new(sink, &[Type::INT4, Type::BYTEA])); for i in 0..2i32 { writer @@ -164,7 +162,7 @@ async fn read_many_rows() { for (i, row) in rows.iter().enumerate() { assert_eq!(row.get::(0), i as i32); - assert_eq!(row.get::<&str>(1), format!("the value for {}", i)); + assert_eq!(row.get::<&str>(1), format!("the value for {i}")); } } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 3debf4eba..b005d244c 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -2,12 +2,11 @@ use bytes::{Bytes, BytesMut}; use futures_channel::mpsc; -use futures_util::{ - future, join, pin_mut, stream, try_join, Future, FutureExt, SinkExt, StreamExt, TryStreamExt, -}; +use futures_util::{join, stream, try_join, FutureExt, SinkExt, StreamExt, TryStreamExt}; use pin_project_lite::pin_project; use std::fmt::Write; -use std::pin::Pin; +use std::future::{self, Future}; +use std::pin::{pin, Pin}; use std::task::{Context, Poll}; use std::time::Duration; use tokio::net::TcpStream; @@ -148,6 +147,12 @@ async fn scram_password_ok() { connect("user=scram_user password=password dbname=postgres").await; } +#[tokio::test] +async fn sync() { + let client = connect("user=postgres").await; + client.check_connection().await.unwrap(); +} + #[tokio::test] async fn pipelined_prepare() { let client = connect("user=postgres").await; @@ -590,8 +595,7 @@ async fn copy_in() { .into_iter() .map(Ok::<_, Error>), ); - let sink = client.copy_in("COPY foo FROM STDIN").await.unwrap(); - pin_mut!(sink); + let mut sink = pin!(client.copy_in("COPY foo FROM STDIN").await.unwrap()); sink.send_all(&mut stream).await.unwrap(); let rows = sink.finish().await.unwrap(); assert_eq!(rows, 2); @@ -625,11 +629,11 @@ async fn copy_in_large() { let a = Bytes::from_static(b"0\tname0\n"); let mut b = BytesMut::new(); for i in 1..5_000 { - writeln!(b, "{0}\tname{0}", i).unwrap(); + writeln!(b, "{i}\tname{i}").unwrap(); } let mut c = BytesMut::new(); for i in 5_000..10_000 { - writeln!(c, "{0}\tname{0}", i).unwrap(); + writeln!(c, "{i}\tname{i}").unwrap(); } let mut stream = stream::iter( vec![a, b.freeze(), c.freeze()] @@ -637,8 +641,7 @@ async fn copy_in_large() { .map(Ok::<_, Error>), ); - let sink = client.copy_in("COPY foo FROM STDIN").await.unwrap(); - pin_mut!(sink); + let mut sink = pin!(client.copy_in("COPY foo FROM STDIN").await.unwrap()); sink.send_all(&mut stream).await.unwrap(); let rows = sink.finish().await.unwrap(); assert_eq!(rows, 10_000); @@ -659,8 +662,7 @@ async fn copy_in_error() { .unwrap(); { - let sink = client.copy_in("COPY foo FROM STDIN").await.unwrap(); - pin_mut!(sink); + let mut sink = pin!(client.copy_in("COPY foo FROM STDIN").await.unwrap()); sink.send(Bytes::from_static(b"1\tsteven")).await.unwrap(); } @@ -705,7 +707,7 @@ async fn copy_out() { async fn notices() { let long_name = "x".repeat(65); let (client, mut connection) = - connect_raw(&format!("user=postgres application_name={}", long_name,)) + connect_raw(&format!("user=postgres application_name={long_name}",)) .await .unwrap(); @@ -929,7 +931,7 @@ async fn query_opt() { .unwrap() .unwrap(); client - .query_one("SELECT * FROM foo", &[]) + .query_opt("SELECT * FROM foo", &[]) .await .err() .unwrap(); diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index 04d422e27..68ce33bdf 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,8 +1,8 @@ use std::time::Duration; -use tokio_postgres::config::{Config, TargetSessionAttrs}; +use tokio_postgres::config::{Config, SslNegotiation, TargetSessionAttrs}; fn check(s: &str, config: &Config) { - assert_eq!(s.parse::().expect(s), *config, "`{}`", s); + assert_eq!(s.parse::().expect(s), *config, "`{s}`"); } #[test] @@ -42,6 +42,10 @@ fn settings() { .keepalives_idle(Duration::from_secs(30)) .target_session_attrs(TargetSessionAttrs::ReadOnly), ); + check( + "sslnegotiation=direct", + Config::new().ssl_negotiation(SslNegotiation::Direct), + ); } #[test] diff --git a/tokio-postgres/tests/test/types/bit_vec_07.rs b/tokio-postgres/tests/test/types/bit_vec_07.rs new file mode 100644 index 000000000..5ea4a00bb --- /dev/null +++ b/tokio-postgres/tests/test/types/bit_vec_07.rs @@ -0,0 +1,31 @@ +use bit_vec_07::BitVec; + +use crate::types::test_type; + +#[tokio::test] +async fn test_bit_params() { + let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); + bv.pop(); + bv.pop(); + test_type( + "BIT(14)", + &[(Some(bv), "B'01101001000001'"), (None, "NULL")], + ) + .await +} + +#[tokio::test] +async fn test_varbit_params() { + let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); + bv.pop(); + bv.pop(); + test_type( + "VARBIT", + &[ + (Some(bv), "B'01101001000001'"), + (Some(BitVec::from_bytes(&[])), "B''"), + (None, "NULL"), + ], + ) + .await +} diff --git a/tokio-postgres/tests/test/types/bit_vec_08.rs b/tokio-postgres/tests/test/types/bit_vec_08.rs new file mode 100644 index 000000000..f01de9b75 --- /dev/null +++ b/tokio-postgres/tests/test/types/bit_vec_08.rs @@ -0,0 +1,31 @@ +use bit_vec_08::BitVec; + +use crate::types::test_type; + +#[tokio::test] +async fn test_bit_params() { + let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); + bv.pop(); + bv.pop(); + test_type( + "BIT(14)", + &[(Some(bv), "B'01101001000001'"), (None, "NULL")], + ) + .await +} + +#[tokio::test] +async fn test_varbit_params() { + let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); + bv.pop(); + bv.pop(); + test_type( + "VARBIT", + &[ + (Some(bv), "B'01101001000001'"), + (Some(BitVec::from_bytes(&[])), "B''"), + (None, "NULL"), + ], + ) + .await +} diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index b010055ba..fe99c6754 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -1,4 +1,4 @@ -use chrono_04::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; +use chrono_04::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use std::fmt; use tokio_postgres::types::{Date, FromSqlOwned, Timestamp}; use tokio_postgres::Client; @@ -53,18 +53,20 @@ async fn test_with_special_naive_date_time_params() { async fn test_date_time_params() { fn make_check(time: &str) -> (Option>, &str) { ( - Some(Utc.from_utc_datetime( - &NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), - )), + Some( + DateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f%#z'") + .unwrap() + .to_utc(), + ), time, ) } test_type( "TIMESTAMP WITH TIME ZONE", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), (None, "NULL"), ], ) @@ -75,18 +77,20 @@ async fn test_date_time_params() { async fn test_with_special_date_time_params() { fn make_check(time: &str) -> (Timestamp>, &str) { ( - Timestamp::Value(Utc.from_utc_datetime( - &NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), - )), + Timestamp::Value( + DateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f%#z'") + .unwrap() + .to_utc(), + ), time, ) } test_type( "TIMESTAMP WITH TIME ZONE", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), (Timestamp::PosInfinity, "'infinity'"), (Timestamp::NegInfinity, "'-infinity'"), ], @@ -162,15 +166,12 @@ async fn test_special_params_without_wrapper() { T: FromSqlOwned + fmt::Debug, { let err = client - .query_one(&*format!("SELECT {}::{}", val, sql_type), &[]) + .query_one(&*format!("SELECT {val}::{sql_type}"), &[]) .await .unwrap() .try_get::<_, T>(0) .unwrap_err(); - assert_eq!( - err.to_string(), - "error deserializing column 0: value too large to decode" - ); + assert_eq!(err.to_string(), "error deserializing column 0"); } let mut client = connect("user=postgres").await; diff --git a/tokio-postgres/tests/test/types/jiff_01.rs b/tokio-postgres/tests/test/types/jiff_01.rs new file mode 100644 index 000000000..b703036e1 --- /dev/null +++ b/tokio-postgres/tests/test/types/jiff_01.rs @@ -0,0 +1,169 @@ +use jiff_01::{ + civil::{Date as JiffDate, DateTime, Time}, + Timestamp as JiffTimestamp, +}; +use std::fmt; +use tokio_postgres::{ + types::{Date, FromSqlOwned, Timestamp}, + Client, +}; + +use crate::connect; +use crate::types::test_type; + +#[tokio::test] +async fn test_datetime_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_datetime_params() { + fn make_check(s: &str) -> (Timestamp, &str) { + (Timestamp::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_timestamp_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_timestamp_params() { + fn make_check(s: &str) -> (Timestamp, &str) { + (Timestamp::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_date_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_date_params() { + fn make_check(s: &str) -> (Date, &str) { + (Date::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (Date::PosInfinity, "'infinity'"), + (Date::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_time_params() { + fn make_check(s: &str) -> (Option