From dc1dc7070e73bae46633ea621f74d135508122f7 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 20 Sep 2024 21:32:20 +0900 Subject: [PATCH] Invert llvm cfgs --- build.rs | 23 ++++++++---------- src/imp/atomic128/aarch64.rs | 42 ++++++++++++++++----------------- src/imp/atomic128/intrinsics.rs | 28 +++++++++++----------- src/imp/atomic128/mod.rs | 2 +- src/imp/atomic128/powerpc64.rs | 4 ++-- src/imp/x86.rs | 4 ++-- 6 files changed, 50 insertions(+), 53 deletions(-) diff --git a/build.rs b/build.rs index 500f1eaf..7608a63c 100644 --- a/build.rs +++ b/build.rs @@ -53,7 +53,7 @@ fn main() { // Custom cfgs set by build script. Not public API. // grep -F 'cargo:rustc-cfg=' build.rs | grep -Ev '^ *//' | sed -E 's/^.*cargo:rustc-cfg=//; s/(=\\)?".*$//' | LC_ALL=C sort -u | tr '\n' ',' | sed -E 's/,$/\n/' println!( - "cargo:rustc-check-cfg=cfg(portable_atomic_disable_fiq,portable_atomic_force_amo,portable_atomic_ll_sc_rmw,portable_atomic_llvm_15,portable_atomic_llvm_16,portable_atomic_llvm_18,portable_atomic_new_atomic_intrinsics,portable_atomic_no_asm,portable_atomic_no_asm_maybe_uninit,portable_atomic_no_atomic_64,portable_atomic_no_atomic_cas,portable_atomic_no_atomic_load_store,portable_atomic_no_atomic_min_max,portable_atomic_no_cfg_target_has_atomic,portable_atomic_no_cmpxchg16b_intrinsic,portable_atomic_no_cmpxchg16b_target_feature,portable_atomic_no_const_raw_ptr_deref,portable_atomic_no_const_transmute,portable_atomic_no_core_unwind_safe,portable_atomic_no_diagnostic_namespace,portable_atomic_no_stronger_failure_ordering,portable_atomic_no_track_caller,portable_atomic_no_unsafe_op_in_unsafe_fn,portable_atomic_s_mode,portable_atomic_sanitize_thread,portable_atomic_target_feature,portable_atomic_unsafe_assume_single_core,portable_atomic_unstable_asm,portable_atomic_unstable_asm_experimental_arch,portable_atomic_unstable_cfg_target_has_atomic,portable_atomic_unstable_isa_attribute)" + "cargo:rustc-check-cfg=cfg(portable_atomic_disable_fiq,portable_atomic_force_amo,portable_atomic_ll_sc_rmw,portable_atomic_no_llvm_15,portable_atomic_no_llvm_16,portable_atomic_no_llvm_18,portable_atomic_new_atomic_intrinsics,portable_atomic_no_asm,portable_atomic_no_asm_maybe_uninit,portable_atomic_no_atomic_64,portable_atomic_no_atomic_cas,portable_atomic_no_atomic_load_store,portable_atomic_no_atomic_min_max,portable_atomic_no_cfg_target_has_atomic,portable_atomic_no_cmpxchg16b_intrinsic,portable_atomic_no_cmpxchg16b_target_feature,portable_atomic_no_const_raw_ptr_deref,portable_atomic_no_const_transmute,portable_atomic_no_core_unwind_safe,portable_atomic_no_diagnostic_namespace,portable_atomic_no_stronger_failure_ordering,portable_atomic_no_track_caller,portable_atomic_no_unsafe_op_in_unsafe_fn,portable_atomic_s_mode,portable_atomic_sanitize_thread,portable_atomic_target_feature,portable_atomic_unsafe_assume_single_core,portable_atomic_unstable_asm,portable_atomic_unstable_asm_experimental_arch,portable_atomic_unstable_cfg_target_has_atomic,portable_atomic_unstable_isa_attribute)" ); // TODO: handle multi-line target_feature_fallback // grep -F 'target_feature_fallback("' build.rs | grep -Ev '^ *//' | sed -E 's/^.*target_feature_fallback\(//; s/",.*$/"/' | LC_ALL=C sort -u | tr '\n' ',' | sed -E 's/,$/\n/' @@ -167,9 +167,16 @@ fn main() { println!("cargo:rustc-cfg=portable_atomic_no_atomic_load_store"); } - if version.llvm >= 16 { - println!("cargo:rustc-cfg=portable_atomic_llvm_16"); + if version.llvm < 18 { + println!("cargo:rustc-cfg=portable_atomic_no_llvm_18"); + if version.llvm < 16 { + println!("cargo:rustc-cfg=portable_atomic_no_llvm_16"); + if version.llvm < 15 { + println!("cargo:rustc-cfg=portable_atomic_no_llvm_15"); + } + } } + if version.nightly { // `cfg(sanitize = "..")` is not stabilized. let sanitize = env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); @@ -332,11 +339,6 @@ fn main() { target_feature_fallback("zaamo", false); } "powerpc64" => { - // For Miri and ThreadSanitizer. - if version.nightly && version.llvm >= 15 { - println!("cargo:rustc-cfg=portable_atomic_llvm_15"); - } - let target_endian = env::var("CARGO_CFG_TARGET_ENDIAN").expect("CARGO_CFG_TARGET_ENDIAN not set"); // powerpc64le is pwr8+ by default https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/lib/Target/PowerPC/PPC.td#L702 @@ -364,11 +366,6 @@ fn main() { target_feature_fallback("quadword-atomics", has_pwr8_features); } "s390x" => { - // For Miri and ThreadSanitizer. - if version.nightly && version.llvm >= 18 { - println!("cargo:rustc-cfg=portable_atomic_llvm_18"); - } - // https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/lib/Target/SystemZ/SystemZFeatures.td let mut arch9_features = false; // z196+ let mut arch13_features = false; // z15+ diff --git a/src/imp/atomic128/aarch64.rs b/src/imp/atomic128/aarch64.rs index 9eaebbe6..91eacf38 100644 --- a/src/imp/atomic128/aarch64.rs +++ b/src/imp/atomic128/aarch64.rs @@ -358,7 +358,7 @@ macro_rules! start_lse { ".arch_extension lse" }; } -#[cfg(portable_atomic_llvm_16)] +#[cfg(not(portable_atomic_no_llvm_16))] #[cfg(any( target_feature = "lse128", portable_atomic_target_feature = "lse128", @@ -372,7 +372,7 @@ macro_rules! start_lse128 { ".arch_extension lse128" }; } -#[cfg(portable_atomic_llvm_16)] +#[cfg(not(portable_atomic_no_llvm_16))] #[cfg(any( target_feature = "rcpc3", portable_atomic_target_feature = "rcpc3", @@ -420,7 +420,7 @@ macro_rules! atomic_rmw { } }; } -#[cfg(not(portable_atomic_llvm_16))] +#[cfg(portable_atomic_no_llvm_16)] #[cfg(any( target_feature = "lse128", portable_atomic_target_feature = "lse128", @@ -717,7 +717,7 @@ unsafe fn _atomic_load_ldiapp(src: *mut u128, order: Ordering) -> u128 { let (out_lo, out_hi); match order { Ordering::Acquire => { - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] asm!( start_rcpc3!(), "ldiapp {out_lo}, {out_hi}, [{src}]", @@ -728,7 +728,7 @@ unsafe fn _atomic_load_ldiapp(src: *mut u128, order: Ordering) -> u128 { ); // LLVM supports FEAT_LRCPC3 instructions on LLVM 16+, so use .inst directive on old LLVM. // https://github.com/llvm/llvm-project/commit/a6aaa969f7caec58a994142f8d855861cf3a1463 - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] asm!( // 0: d9411800 ldiapp x0, x1, [x0] ".inst 0xd9411800", @@ -739,7 +739,7 @@ unsafe fn _atomic_load_ldiapp(src: *mut u128, order: Ordering) -> u128 { ); } Ordering::SeqCst => { - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] asm!( start_rcpc3!(), // ldar (or dmb ishld) is required to prevent reordering with preceding stlxp. @@ -754,7 +754,7 @@ unsafe fn _atomic_load_ldiapp(src: *mut u128, order: Ordering) -> u128 { ); // LLVM supports FEAT_LRCPC3 instructions on LLVM 16+, so use .inst directive on old LLVM. // https://github.com/llvm/llvm-project/commit/a6aaa969f7caec58a994142f8d855861cf3a1463 - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] asm!( // ldar (or dmb ishld) is required to prevent reordering with preceding stlxp. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108891 for details. @@ -1157,7 +1157,7 @@ unsafe fn _atomic_store_stilp(dst: *mut u128, val: u128, order: Ordering) { macro_rules! atomic_store { ($acquire:tt) => {{ let val = U128 { whole: val }; - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] asm!( start_rcpc3!(), "stilp {val_lo}, {val_hi}, [{dst}]", @@ -1169,7 +1169,7 @@ unsafe fn _atomic_store_stilp(dst: *mut u128, val: u128, order: Ordering) { ); // LLVM supports FEAT_LRCPC3 instructions on LLVM 16+, so use .inst directive on old LLVM. // https://github.com/llvm/llvm-project/commit/a6aaa969f7caec58a994142f8d855861cf3a1463 - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] asm!( // 0: d9031802 stilp x2, x3, [x0] ".inst 0xd9031802", @@ -1607,7 +1607,7 @@ unsafe fn _atomic_swap_swpp(dst: *mut u128, val: u128, order: Ordering) -> u128 unsafe { let val = U128 { whole: val }; let (prev_lo, prev_hi); - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] macro_rules! swap { ($acquire:tt, $release:tt, $fence:tt) => { asm!( @@ -1621,11 +1621,11 @@ unsafe fn _atomic_swap_swpp(dst: *mut u128, val: u128, order: Ordering) -> u128 ) }; } - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] atomic_rmw!(swap, order); // LLVM supports FEAT_LSE128 instructions on LLVM 16+, so use .inst directive on old LLVM. // https://github.com/llvm/llvm-project/commit/7fea6f2e0e606e5339c3359568f680eaf64aa306 - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] macro_rules! swap { ($order:tt, $fence:tt) => { asm!( @@ -1639,7 +1639,7 @@ unsafe fn _atomic_swap_swpp(dst: *mut u128, val: u128, order: Ordering) -> u128 ) }; } - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] atomic_rmw_inst!(swap, order); U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole } @@ -2028,7 +2028,7 @@ unsafe fn atomic_and(dst: *mut u128, val: u128, order: Ordering) -> u128 { unsafe { let val = U128 { whole: !val }; let (prev_lo, prev_hi); - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] macro_rules! clear { ($acquire:tt, $release:tt, $fence:tt) => { asm!( @@ -2042,11 +2042,11 @@ unsafe fn atomic_and(dst: *mut u128, val: u128, order: Ordering) -> u128 { ) }; } - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] atomic_rmw!(clear, order); // LLVM supports FEAT_LSE128 instructions on LLVM 16+, so use .inst directive on old LLVM. // https://github.com/llvm/llvm-project/commit/7fea6f2e0e606e5339c3359568f680eaf64aa306 - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] macro_rules! clear { ($order:tt, $fence:tt) => { asm!( @@ -2060,7 +2060,7 @@ unsafe fn atomic_and(dst: *mut u128, val: u128, order: Ordering) -> u128 { ) }; } - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] atomic_rmw_inst!(clear, order); U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole } @@ -2106,7 +2106,7 @@ unsafe fn atomic_or(dst: *mut u128, val: u128, order: Ordering) -> u128 { unsafe { let val = U128 { whole: val }; let (prev_lo, prev_hi); - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] macro_rules! or { ($acquire:tt, $release:tt, $fence:tt) => { asm!( @@ -2120,11 +2120,11 @@ unsafe fn atomic_or(dst: *mut u128, val: u128, order: Ordering) -> u128 { ) }; } - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] atomic_rmw!(or, order); // LLVM supports FEAT_LSE128 instructions on LLVM 16+, so use .inst directive on old LLVM. // https://github.com/llvm/llvm-project/commit/7fea6f2e0e606e5339c3359568f680eaf64aa306 - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] macro_rules! or { ($order:tt, $fence:tt) => { asm!( @@ -2138,7 +2138,7 @@ unsafe fn atomic_or(dst: *mut u128, val: u128, order: Ordering) -> u128 { ) }; } - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] atomic_rmw_inst!(or, order); U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole } diff --git a/src/imp/atomic128/intrinsics.rs b/src/imp/atomic128/intrinsics.rs index dc4448e3..6a6938b4 100644 --- a/src/imp/atomic128/intrinsics.rs +++ b/src/imp/atomic128/intrinsics.rs @@ -248,13 +248,13 @@ where // On x86_64, we use core::arch::x86_64::cmpxchg16b instead of core::intrinsics. // - On s390x, old LLVM (pre-18) generates libcalls for operations other than load/store/cmpxchg (see also module-level comment). -#[cfg(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18))))] +#[cfg(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18)))] atomic_rmw_by_atomic_update!(); // On powerpc64, LLVM doesn't support 128-bit atomic min/max (see also module-level comment). #[cfg(target_arch = "powerpc64")] atomic_rmw_by_atomic_update!(cmp); -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_swap(dst: *mut u128, val: u128, order: Ordering) -> u128 { @@ -271,7 +271,7 @@ unsafe fn atomic_swap(dst: *mut u128, val: u128, order: Ordering) -> u128 { } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_add(dst: *mut u128, val: u128, order: Ordering) -> u128 { @@ -288,7 +288,7 @@ unsafe fn atomic_add(dst: *mut u128, val: u128, order: Ordering) -> u128 { } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_sub(dst: *mut u128, val: u128, order: Ordering) -> u128 { @@ -305,7 +305,7 @@ unsafe fn atomic_sub(dst: *mut u128, val: u128, order: Ordering) -> u128 { } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_and(dst: *mut u128, val: u128, order: Ordering) -> u128 { @@ -322,7 +322,7 @@ unsafe fn atomic_and(dst: *mut u128, val: u128, order: Ordering) -> u128 { } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_nand(dst: *mut u128, val: u128, order: Ordering) -> u128 { @@ -339,7 +339,7 @@ unsafe fn atomic_nand(dst: *mut u128, val: u128, order: Ordering) -> u128 { } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_or(dst: *mut u128, val: u128, order: Ordering) -> u128 { @@ -356,7 +356,7 @@ unsafe fn atomic_or(dst: *mut u128, val: u128, order: Ordering) -> u128 { } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_xor(dst: *mut u128, val: u128, order: Ordering) -> u128 { @@ -376,7 +376,7 @@ unsafe fn atomic_xor(dst: *mut u128, val: u128, order: Ordering) -> u128 { #[cfg(not(any( target_arch = "x86_64", target_arch = "powerpc64", - all(target_arch = "s390x", not(portable_atomic_llvm_18)), + all(target_arch = "s390x", portable_atomic_no_llvm_18), )))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces @@ -398,7 +398,7 @@ unsafe fn atomic_max(dst: *mut u128, val: u128, order: Ordering) -> i128 { #[cfg(not(any( target_arch = "x86_64", target_arch = "powerpc64", - all(target_arch = "s390x", not(portable_atomic_llvm_18)), + all(target_arch = "s390x", portable_atomic_no_llvm_18), )))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces @@ -420,7 +420,7 @@ unsafe fn atomic_min(dst: *mut u128, val: u128, order: Ordering) -> i128 { #[cfg(not(any( target_arch = "x86_64", target_arch = "powerpc64", - all(target_arch = "s390x", not(portable_atomic_llvm_18)), + all(target_arch = "s390x", portable_atomic_no_llvm_18), )))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces @@ -441,7 +441,7 @@ unsafe fn atomic_umax(dst: *mut u128, val: u128, order: Ordering) -> u128 { #[cfg(not(any( target_arch = "x86_64", target_arch = "powerpc64", - all(target_arch = "s390x", not(portable_atomic_llvm_18)), + all(target_arch = "s390x", portable_atomic_no_llvm_18), )))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces @@ -459,7 +459,7 @@ unsafe fn atomic_umin(dst: *mut u128, val: u128, order: Ordering) -> u128 { } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_not(dst: *mut u128, order: Ordering) -> u128 { @@ -467,7 +467,7 @@ unsafe fn atomic_not(dst: *mut u128, order: Ordering) -> u128 { unsafe { atomic_xor(dst, !0, order) } } -#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", not(portable_atomic_llvm_18)))))] +#[cfg(not(any(target_arch = "x86_64", all(target_arch = "s390x", portable_atomic_no_llvm_18))))] #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_neg(dst: *mut u128, order: Ordering) -> u128 { diff --git a/src/imp/atomic128/mod.rs b/src/imp/atomic128/mod.rs index 098eb869..873f7c73 100644 --- a/src/imp/atomic128/mod.rs +++ b/src/imp/atomic128/mod.rs @@ -51,7 +51,7 @@ pub(super) mod aarch64; ))] // Use intrinsics.rs on Miri and Sanitizer that do not support inline assembly. #[cfg_attr( - all(any(miri, portable_atomic_sanitize_thread), portable_atomic_llvm_15), + all(any(miri, portable_atomic_sanitize_thread), not(portable_atomic_no_llvm_15)), path = "intrinsics.rs" )] pub(super) mod powerpc64; diff --git a/src/imp/atomic128/powerpc64.rs b/src/imp/atomic128/powerpc64.rs index 0fa7af55..fb3c9f0b 100644 --- a/src/imp/atomic128/powerpc64.rs +++ b/src/imp/atomic128/powerpc64.rs @@ -732,14 +732,14 @@ unsafe fn atomic_not_pwr8(dst: *mut u128, order: Ordering) -> u128 { unsafe { atomic_xor_pwr8(dst, !0, order) } } -#[cfg(portable_atomic_llvm_16)] +#[cfg(not(portable_atomic_no_llvm_16))] atomic_rmw_ll_sc_2! { atomic_neg_pwr8 as atomic_neg, [out("xer") _,], "subfic %r9, %r7, 0", "subfze %r8, %r6", } // LLVM 15 miscompiles subfic. -#[cfg(not(portable_atomic_llvm_16))] +#[cfg(portable_atomic_no_llvm_16)] atomic_rmw_ll_sc_2! { atomic_neg_pwr8 as atomic_neg, [zero = in(reg) 0_u64, out("xer") _,], "subc %r9, {zero}, %r7", diff --git a/src/imp/x86.rs b/src/imp/x86.rs index b8526ba8..7705095e 100644 --- a/src/imp/x86.rs +++ b/src/imp/x86.rs @@ -127,9 +127,9 @@ macro_rules! atomic_bit_opts { // LLVM 16+ can generate `lock bt{s,r,c}` for both immediate and register bit offsets. // https://godbolt.org/z/TGhr5z4ds // So, use fetch_* based implementations on LLVM 16+, otherwise use asm based implementations. - #[cfg(portable_atomic_llvm_16)] + #[cfg(not(portable_atomic_no_llvm_16))] impl_default_bit_opts!($atomic_type, $int_type); - #[cfg(not(portable_atomic_llvm_16))] + #[cfg(portable_atomic_no_llvm_16)] impl $atomic_type { // `::BITS` requires Rust 1.53 const BITS: u32 = (core::mem::size_of::<$int_type>() * 8) as u32;