diff --git a/stl/inc/atomic b/stl/inc/atomic index 8161a9981a..bf946b2620 100644 --- a/stl/inc/atomic +++ b/stl/inc/atomic @@ -315,7 +315,7 @@ struct _Storage_for { #if _CMPXCHG_MASK_OUT_PADDING_BITS explicit _Storage_for(_Form_mask_t) noexcept { _CSTD memset(_Storage, 0xff, sizeof(_Ty)); - __builtin_zero_non_value_bits(_Ptr()); + ::__builtin_zero_non_value_bits(_Ptr()); // TRANSITION, DevCom-10456452 } #endif // _CMPXCHG_MASK_OUT_PADDING_BITS @@ -499,13 +499,13 @@ void _Atomic_wait_direct( const _Atomic_storage<_Ty>* const _This, _Value_type _Expected_bytes, const memory_order _Order) noexcept { const auto _Storage_ptr = _STD addressof(_This->_Storage); for (;;) { - const _Value_type _Observed_bytes = _Atomic_reinterpret_as<_Value_type>(_This->load(_Order)); + const _Value_type _Observed_bytes = _STD _Atomic_reinterpret_as<_Value_type>(_This->load(_Order)); if (_Expected_bytes != _Observed_bytes) { #if _CMPXCHG_MASK_OUT_PADDING_BITS using _TVal = remove_reference_t<_Ty>; if constexpr (_Might_have_non_value_bits<_TVal>) { _Storage_for<_TVal> _Mask{_Form_mask}; - const _Value_type _Mask_val = _Atomic_reinterpret_as<_Value_type>(_Mask._Ref()); + const _Value_type _Mask_val = _STD _Atomic_reinterpret_as<_Value_type>(_Mask._Ref()); if (((_Expected_bytes ^ _Observed_bytes) & _Mask_val) == 0) { _Expected_bytes = _Observed_bytes; @@ -517,7 +517,7 @@ void _Atomic_wait_direct( return; } - __std_atomic_wait_direct(_Storage_ptr, &_Expected_bytes, sizeof(_Value_type), __std_atomic_wait_no_timeout); + ::__std_atomic_wait_direct(_Storage_ptr, &_Expected_bytes, sizeof(_Value_type), __std_atomic_wait_no_timeout); } } #endif // _HAS_CXX20 @@ -659,7 +659,7 @@ struct _Atomic_storage { const auto _Expected_ptr = _STD addressof(_Expected); bool _Result; #if _CMPXCHG_MASK_OUT_PADDING_BITS - __builtin_zero_non_value_bits(_Expected_ptr); + ::__builtin_zero_non_value_bits(_Expected_ptr); // TRANSITION, DevCom-10456452 #endif // _CMPXCHG_MASK_OUT_PADDING_BITS _Guard _Lock{_Spinlock}; #if _CMPXCHG_MASK_OUT_PADDING_BITS @@ -667,7 +667,7 @@ struct _Atomic_storage { _Storage_for<_TVal> _Local; const auto _Local_ptr = _Local._Ptr(); _CSTD memcpy(_Local_ptr, _Storage_ptr, sizeof(_TVal)); - __builtin_zero_non_value_bits(_Local_ptr); + ::__builtin_zero_non_value_bits(_Local_ptr); // TRANSITION, DevCom-10456452 _Result = _CSTD memcmp(_Local_ptr, _Expected_ptr, sizeof(_TVal)) == 0; } else { _Result = _CSTD memcmp(_Storage_ptr, _Expected_ptr, sizeof(_TVal)) == 0; @@ -698,8 +698,8 @@ struct _Atomic_storage { _Storage_for<_TVal> _Local; const auto _Local_ptr = _Local._Ptr(); _CSTD memcpy(_Local_ptr, _Storage_ptr, sizeof(_TVal)); - __builtin_zero_non_value_bits(_Local_ptr); - __builtin_zero_non_value_bits(_Expected_ptr); + ::__builtin_zero_non_value_bits(_Local_ptr); // TRANSITION, DevCom-10456452 + ::__builtin_zero_non_value_bits(_Expected_ptr); // TRANSITION, DevCom-10456452 if (_CSTD memcmp(_Local_ptr, _Expected_ptr, sizeof(_TVal)) == 0) { // _Storage differs from _Expected only by padding; copy the padding from _Storage into // _Expected @@ -716,24 +716,24 @@ struct _Atomic_storage { } } // unlock - __std_atomic_wait_indirect(_Storage_ptr, _Expected_ptr, sizeof(_TVal), &_Spinlock, + ::__std_atomic_wait_indirect(_Storage_ptr, _Expected_ptr, sizeof(_TVal), &_Spinlock, &_Atomic_wait_compare_non_lock_free, __std_atomic_wait_no_timeout); } } void notify_one() noexcept { - __std_atomic_notify_one_indirect(_STD addressof(_Storage)); + ::__std_atomic_notify_one_indirect(_STD addressof(_Storage)); } void notify_all() noexcept { - __std_atomic_notify_all_indirect(_STD addressof(_Storage)); + ::__std_atomic_notify_all_indirect(_STD addressof(_Storage)); } #endif // _HAS_CXX20 #if 1 // TRANSITION, ABI protected: void _Init_spinlock_for_ref() noexcept { - _Spinlock = __std_atomic_get_mutex(_STD addressof(_Storage)); + _Spinlock = ::__std_atomic_get_mutex(_STD addressof(_Storage)); } private: @@ -762,14 +762,14 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics } void store(const _TVal _Value) noexcept { // store with sequential consistency - const auto _Mem = _Atomic_address_as(_Storage); - const char _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const char _As_bytes = _STD _Atomic_reinterpret_as(_Value); _ATOMIC_STORE_SEQ_CST(8, _Mem, _As_bytes) } void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order - const auto _Mem = _Atomic_address_as(_Storage); - const char _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const char _As_bytes = _STD _Atomic_reinterpret_as(_Value); switch (static_cast(_Order)) { _ATOMIC_STORE_PREFIX(8, _Mem, _As_bytes) case _Atomic_memory_order_seq_cst: @@ -780,7 +780,7 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics _NODISCARD _TVal load( const memory_order _Order = memory_order_seq_cst) const noexcept { // load with given memory order - const auto _Mem = _Atomic_address_as(_Storage); + const auto _Mem = _STD _Atomic_address_as(_Storage); char _As_bytes; #if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1 _ATOMIC_LOAD_ARM64(_As_bytes, 8, _Mem, static_cast(_Order)) @@ -795,23 +795,24 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics // exchange with given memory order char _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _As_bytes, _InterlockedExchange8, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Value)); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Value)); return reinterpret_cast<_TVal&>(_As_bytes); } bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with given memory order - char _Expected_bytes = _Atomic_reinterpret_as(_Expected); // read before atomic operation + char _Expected_bytes = _STD _Atomic_reinterpret_as(_Expected); // read before atomic operation char _Prev_bytes; #if _CMPXCHG_MASK_OUT_PADDING_BITS if constexpr (_Might_have_non_value_bits<_TVal>) { _Storage_for<_TVal> _Mask{_Form_mask}; - const char _Mask_val = _Atomic_reinterpret_as(_Mask._Ref()); + const char _Mask_val = _STD _Atomic_reinterpret_as(_Mask._Ref()); for (;;) { _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Prev_bytes, _InterlockedCompareExchange8, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Desired), _Expected_bytes); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Desired), + _Expected_bytes); if (_Prev_bytes == _Expected_bytes) { return true; } @@ -825,7 +826,7 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics } #endif // _CMPXCHG_MASK_OUT_PADDING_BITS _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Prev_bytes, _InterlockedCompareExchange8, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Desired), _Expected_bytes); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Desired), _Expected_bytes); if (_Prev_bytes == _Expected_bytes) { return true; } @@ -836,15 +837,15 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics #if _HAS_CXX20 void wait(const _TVal _Expected, const memory_order _Order = memory_order_seq_cst) const noexcept { - _Atomic_wait_direct(this, _Atomic_reinterpret_as(_Expected), _Order); + _STD _Atomic_wait_direct(this, _STD _Atomic_reinterpret_as(_Expected), _Order); } void notify_one() noexcept { - __std_atomic_notify_one_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_one_direct(_STD addressof(_Storage)); } void notify_all() noexcept { - __std_atomic_notify_all_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_all_direct(_STD addressof(_Storage)); } #endif // _HAS_CXX20 @@ -864,14 +865,14 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics } void store(const _TVal _Value) noexcept { // store with sequential consistency - const auto _Mem = _Atomic_address_as(_Storage); - const short _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const short _As_bytes = _STD _Atomic_reinterpret_as(_Value); _ATOMIC_STORE_SEQ_CST(16, _Mem, _As_bytes) } void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order - const auto _Mem = _Atomic_address_as(_Storage); - const short _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const short _As_bytes = _STD _Atomic_reinterpret_as(_Value); switch (static_cast(_Order)) { _ATOMIC_STORE_PREFIX(16, _Mem, _As_bytes) case _Atomic_memory_order_seq_cst: @@ -882,7 +883,7 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics _NODISCARD _TVal load( const memory_order _Order = memory_order_seq_cst) const noexcept { // load with given memory order - const auto _Mem = _Atomic_address_as(_Storage); + const auto _Mem = _STD _Atomic_address_as(_Storage); short _As_bytes; #if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1 _ATOMIC_LOAD_ARM64(_As_bytes, 16, _Mem, static_cast(_Order)) @@ -897,22 +898,23 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics // exchange with given memory order short _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _As_bytes, _InterlockedExchange16, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Value)); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Value)); return reinterpret_cast<_TVal&>(_As_bytes); } bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with given memory order - short _Expected_bytes = _Atomic_reinterpret_as(_Expected); // read before atomic operation + short _Expected_bytes = _STD _Atomic_reinterpret_as(_Expected); // read before atomic operation short _Prev_bytes; #if _CMPXCHG_MASK_OUT_PADDING_BITS if constexpr (_Might_have_non_value_bits<_Ty>) { _Storage_for<_TVal> _Mask{_Form_mask}; - const short _Mask_val = _Atomic_reinterpret_as(_Mask._Ref()); + const short _Mask_val = _STD _Atomic_reinterpret_as(_Mask._Ref()); for (;;) { _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Prev_bytes, _InterlockedCompareExchange16, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Desired), _Expected_bytes); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Desired), + _Expected_bytes); if (_Prev_bytes == _Expected_bytes) { return true; } @@ -926,7 +928,7 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics } #endif // _CMPXCHG_MASK_OUT_PADDING_BITS _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Prev_bytes, _InterlockedCompareExchange16, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Desired), _Expected_bytes); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Desired), _Expected_bytes); if (_Prev_bytes == _Expected_bytes) { return true; } @@ -937,15 +939,15 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics #if _HAS_CXX20 void wait(const _TVal _Expected, const memory_order _Order = memory_order_seq_cst) const noexcept { - _Atomic_wait_direct(this, _Atomic_reinterpret_as(_Expected), _Order); + _STD _Atomic_wait_direct(this, _STD _Atomic_reinterpret_as(_Expected), _Order); } void notify_one() noexcept { - __std_atomic_notify_one_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_one_direct(_STD addressof(_Storage)); } void notify_all() noexcept { - __std_atomic_notify_all_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_all_direct(_STD addressof(_Storage)); } #endif // _HAS_CXX20 @@ -965,14 +967,14 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics } void store(const _TVal _Value) noexcept { // store with sequential consistency - const auto _Mem = _Atomic_address_as(_Storage); - const int _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const int _As_bytes = _STD _Atomic_reinterpret_as(_Value); _ATOMIC_STORE_32_SEQ_CST(_Mem, _As_bytes) } void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order - const auto _Mem = _Atomic_address_as(_Storage); - const int _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const int _As_bytes = _STD _Atomic_reinterpret_as(_Value); switch (static_cast(_Order)) { _ATOMIC_STORE_PREFIX(32, _Mem, _As_bytes) case _Atomic_memory_order_seq_cst: @@ -998,7 +1000,7 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics // exchange with given memory order long _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _As_bytes, _InterlockedExchange, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Value)); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Value)); return reinterpret_cast<_TVal&>(_As_bytes); } @@ -1039,15 +1041,15 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics #if _HAS_CXX20 void wait(const _TVal _Expected, const memory_order _Order = memory_order_seq_cst) const noexcept { - _Atomic_wait_direct(this, _Atomic_reinterpret_as(_Expected), _Order); + _STD _Atomic_wait_direct(this, _STD _Atomic_reinterpret_as(_Expected), _Order); } void notify_one() noexcept { - __std_atomic_notify_one_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_one_direct(_STD addressof(_Storage)); } void notify_all() noexcept { - __std_atomic_notify_all_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_all_direct(_STD addressof(_Storage)); } #endif // _HAS_CXX20 @@ -1067,14 +1069,14 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics } void store(const _TVal _Value) noexcept { // store with sequential consistency - const auto _Mem = _Atomic_address_as(_Storage); - const long long _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const long long _As_bytes = _STD _Atomic_reinterpret_as(_Value); _ATOMIC_STORE_64_SEQ_CST(_Mem, _As_bytes); } void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order - const auto _Mem = _Atomic_address_as(_Storage); - const long long _As_bytes = _Atomic_reinterpret_as(_Value); + const auto _Mem = _STD _Atomic_address_as(_Storage); + const long long _As_bytes = _STD _Atomic_reinterpret_as(_Value); switch (static_cast(_Order)) { _ATOMIC_STORE_PREFIX(64, _Mem, _As_bytes) case _Atomic_memory_order_seq_cst: @@ -1116,7 +1118,7 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics // exchange with given memory order long long _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _As_bytes, _InterlockedExchange64, - _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Value)); + _STD _Atomic_address_as(_Storage), _STD _Atomic_reinterpret_as(_Value)); return reinterpret_cast<_TVal&>(_As_bytes); } #endif // ^^^ !defined(_M_IX86) ^^^ @@ -1160,15 +1162,15 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics #if _HAS_CXX20 void wait(const _TVal _Expected, const memory_order _Order = memory_order_seq_cst) const noexcept { - _Atomic_wait_direct(this, _Atomic_reinterpret_as(_Expected), _Order); + _STD _Atomic_wait_direct(this, _STD _Atomic_reinterpret_as(_Expected), _Order); } void notify_one() noexcept { - __std_atomic_notify_one_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_one_direct(_STD addressof(_Storage)); } void notify_all() noexcept { - __std_atomic_notify_all_direct(_STD addressof(_Storage)); + ::__std_atomic_notify_all_direct(_STD addressof(_Storage)); } #endif // _HAS_CXX20 @@ -1196,7 +1198,7 @@ struct _Atomic_storage<_Ty&, 16> { // lock-free using 16-byte intrinsics } _NODISCARD _TVal load() const noexcept { // load with sequential consistency - long long* const _Storage_ptr = const_cast(_Atomic_address_as(_Storage)); + long long* const _Storage_ptr = const_cast(_STD _Atomic_address_as(_Storage)); _Int128 _Result{}; // atomic CAS 0 with 0 (void) _STD_COMPARE_EXCHANGE_128(_Storage_ptr, 0, 0, &_Result._Low); return reinterpret_cast<_TVal&>(_Result); @@ -1204,7 +1206,7 @@ struct _Atomic_storage<_Ty&, 16> { // lock-free using 16-byte intrinsics _NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order #if defined(_M_ARM64) || defined(_M_ARM64EC) - long long* const _Storage_ptr = const_cast(_Atomic_address_as(_Storage)); + long long* const _Storage_ptr = const_cast(_STD _Atomic_address_as(_Storage)); _Int128 _Result{}; // atomic CAS 0 with 0 switch (_Order) { case memory_order_relaxed: @@ -1265,7 +1267,7 @@ struct _Atomic_storage<_Ty&, 16> { // lock-free using 16-byte intrinsics for (;;) { #if defined(_M_ARM64) || defined(_M_ARM64EC) _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedCompareExchange128, - _Atomic_address_as(_Storage), _Desired_bytes._High, _Desired_bytes._Low, + _STD _Atomic_address_as(_Storage), _Desired_bytes._High, _Desired_bytes._Low, &_Expected_temp._Low); #else // ^^^ _M_ARM64, _M_ARM64EC / _M_X64 vvv (void) _Order; @@ -1291,7 +1293,8 @@ struct _Atomic_storage<_Ty&, 16> { // lock-free using 16-byte intrinsics #endif // _CMPXCHG_MASK_OUT_PADDING_BITS #if defined(_M_ARM64) || defined(_M_ARM64EC) _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedCompareExchange128, - _Atomic_address_as(_Storage), _Desired_bytes._High, _Desired_bytes._Low, &_Expected_temp._Low); + _STD _Atomic_address_as(_Storage), _Desired_bytes._High, _Desired_bytes._Low, + &_Expected_temp._Low); #else // ^^^ _M_ARM64, _M_ARM64EC / _M_X64 vvv (void) _Order; _Result = _STD_COMPARE_EXCHANGE_128( @@ -1330,17 +1333,17 @@ struct _Atomic_storage<_Ty&, 16> { // lock-free using 16-byte intrinsics return; } - __std_atomic_wait_indirect(_Storage_ptr, _Expected_ptr, sizeof(_TVal), nullptr, + ::__std_atomic_wait_indirect(_Storage_ptr, _Expected_ptr, sizeof(_TVal), nullptr, &_Atomic_wait_compare_16_bytes, __std_atomic_wait_no_timeout); } } void notify_one() noexcept { - __std_atomic_notify_one_indirect(_STD addressof(_Storage)); + ::__std_atomic_notify_one_indirect(_STD addressof(_Storage)); } void notify_all() noexcept { - __std_atomic_notify_all_indirect(_STD addressof(_Storage)); + ::__std_atomic_notify_all_indirect(_STD addressof(_Storage)); } #endif // _HAS_CXX20 @@ -1366,49 +1369,49 @@ struct _Atomic_integral<_Ty, 1> : _Atomic_storage<_Ty> { // atomic integral oper _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd8, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedAnd8, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedOr8, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedXor8, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal operator++(int) noexcept { - return static_cast<_TVal>(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), 1)); + return static_cast<_TVal>(_InterlockedExchangeAdd8(_STD _Atomic_address_as(this->_Storage), 1)); } _TVal operator++() noexcept { unsigned char _Before = - static_cast(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), 1)); + static_cast(_InterlockedExchangeAdd8(_STD _Atomic_address_as(this->_Storage), 1)); ++_Before; return static_cast<_TVal>(_Before); } _TVal operator--(int) noexcept { - return static_cast<_TVal>(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), -1)); + return static_cast<_TVal>(_InterlockedExchangeAdd8(_STD _Atomic_address_as(this->_Storage), -1)); } _TVal operator--() noexcept { unsigned char _Before = - static_cast(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), -1)); + static_cast(_InterlockedExchangeAdd8(_STD _Atomic_address_as(this->_Storage), -1)); --_Before; return static_cast<_TVal>(_Before); } @@ -1424,51 +1427,51 @@ struct _Atomic_integral<_Ty, 2> : _Atomic_storage<_Ty> { // atomic integral oper _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd16, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedAnd16, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedOr16, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedXor16, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal operator++(int) noexcept { unsigned short _After = - static_cast(_InterlockedIncrement16(_Atomic_address_as(this->_Storage))); + static_cast(_InterlockedIncrement16(_STD _Atomic_address_as(this->_Storage))); --_After; return static_cast<_TVal>(_After); } _TVal operator++() noexcept { - return static_cast<_TVal>(_InterlockedIncrement16(_Atomic_address_as(this->_Storage))); + return static_cast<_TVal>(_InterlockedIncrement16(_STD _Atomic_address_as(this->_Storage))); } _TVal operator--(int) noexcept { unsigned short _After = - static_cast(_InterlockedDecrement16(_Atomic_address_as(this->_Storage))); + static_cast(_InterlockedDecrement16(_STD _Atomic_address_as(this->_Storage))); ++_After; return static_cast<_TVal>(_After); } _TVal operator--() noexcept { - return static_cast<_TVal>(_InterlockedDecrement16(_Atomic_address_as(this->_Storage))); + return static_cast<_TVal>(_InterlockedDecrement16(_STD _Atomic_address_as(this->_Storage))); } }; @@ -1482,51 +1485,51 @@ struct _Atomic_integral<_Ty, 4> : _Atomic_storage<_Ty> { // atomic integral oper _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedAnd, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedOr, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedXor, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal operator++(int) noexcept { unsigned long _After = - static_cast(_InterlockedIncrement(_Atomic_address_as(this->_Storage))); + static_cast(_InterlockedIncrement(_STD _Atomic_address_as(this->_Storage))); --_After; return static_cast<_TVal>(_After); } _TVal operator++() noexcept { - return static_cast<_TVal>(_InterlockedIncrement(_Atomic_address_as(this->_Storage))); + return static_cast<_TVal>(_InterlockedIncrement(_STD _Atomic_address_as(this->_Storage))); } _TVal operator--(int) noexcept { unsigned long _After = - static_cast(_InterlockedDecrement(_Atomic_address_as(this->_Storage))); + static_cast(_InterlockedDecrement(_STD _Atomic_address_as(this->_Storage))); ++_After; return static_cast<_TVal>(_After); } _TVal operator--() noexcept { - return static_cast<_TVal>(_InterlockedDecrement(_Atomic_address_as(this->_Storage))); + return static_cast<_TVal>(_InterlockedDecrement(_STD _Atomic_address_as(this->_Storage))); } }; @@ -1594,51 +1597,51 @@ struct _Atomic_integral<_Ty, 8> : _Atomic_storage<_Ty> { // atomic integral oper _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd64, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedAnd64, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedOr64, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedXor64, - _Atomic_address_as(this->_Storage), static_cast(_Operand)); + _STD _Atomic_address_as(this->_Storage), static_cast(_Operand)); return static_cast<_TVal>(_Result); } _TVal operator++(int) noexcept { - unsigned long long _After = - static_cast(_InterlockedIncrement64(_Atomic_address_as(this->_Storage))); + unsigned long long _After = static_cast( + _InterlockedIncrement64(_STD _Atomic_address_as(this->_Storage))); --_After; return static_cast<_TVal>(_After); } _TVal operator++() noexcept { - return static_cast<_TVal>(_InterlockedIncrement64(_Atomic_address_as(this->_Storage))); + return static_cast<_TVal>(_InterlockedIncrement64(_STD _Atomic_address_as(this->_Storage))); } _TVal operator--(int) noexcept { - unsigned long long _After = - static_cast(_InterlockedDecrement64(_Atomic_address_as(this->_Storage))); + unsigned long long _After = static_cast( + _InterlockedDecrement64(_STD _Atomic_address_as(this->_Storage))); ++_After; return static_cast<_TVal>(_After); } _TVal operator--() noexcept { - return static_cast<_TVal>(_InterlockedDecrement64(_Atomic_address_as(this->_Storage))); + return static_cast<_TVal>(_InterlockedDecrement64(_STD _Atomic_address_as(this->_Storage))); } #endif // ^^^ !defined(_M_IX86) ^^^ }; @@ -1983,10 +1986,10 @@ struct _Atomic_pointer : _Atomic_storage<_Ty> { ptrdiff_t _Result; #if defined(_M_IX86) || defined(_M_ARM) _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd, - _Atomic_address_as(this->_Storage), _Shift_bytes); + _STD _Atomic_address_as(this->_Storage), _Shift_bytes); #else // ^^^ 32 bits / 64 bits vvv _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd64, - _Atomic_address_as(this->_Storage), _Shift_bytes); + _STD _Atomic_address_as(this->_Storage), _Shift_bytes); #endif // ^^^ 64 bits ^^^ return reinterpret_cast<_Ty>(_Result); } @@ -2079,10 +2082,10 @@ struct _Atomic_pointer<_Ty&> : _Atomic_storage<_Ty&> { ptrdiff_t _Result; #if defined(_M_IX86) || defined(_M_ARM) _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd, - _Atomic_address_as(this->_Storage), _Shift_bytes); + _STD _Atomic_address_as(this->_Storage), _Shift_bytes); #else // ^^^ 32 bits / 64 bits vvv _ATOMIC_CHOOSE_INTRINSIC(static_cast(_Order), _Result, _InterlockedExchangeAdd64, - _Atomic_address_as(this->_Storage), _Shift_bytes); + _STD _Atomic_address_as(this->_Storage), _Shift_bytes); #endif // ^^^ 64 bits ^^^ return reinterpret_cast<_Ty>(_Result); } diff --git a/tests/std/tests/P0019R8_atomic_ref/test.cpp b/tests/std/tests/P0019R8_atomic_ref/test.cpp index fa119be524..a5aee42c08 100644 --- a/tests/std/tests/P0019R8_atomic_ref/test.cpp +++ b/tests/std/tests/P0019R8_atomic_ref/test.cpp @@ -7,7 +7,9 @@ #include #include #include +#include #include +#include #include struct bigint { @@ -290,6 +292,87 @@ void test_gh_1497() { } } +#ifndef _M_CEE // TRANSITION, VSO-1659496 +// GH-140: STL: We should _STD qualify _Ugly function calls to avoid ADL +template +struct holder { + T t; +}; + +struct incomplete; + +template +struct tagged_trivial { + T t; +}; + +template +void test_incomplete_associated_class() { // COMPILE-ONLY + T o{}; + std::atomic_ref a{o}; + + a = o; + + (void) a.is_lock_free(); + (void) a.load(); + (void) a.load(std::memory_order_relaxed); + a.store(T{}); + a.store(T{}, std::memory_order_relaxed); + (void) a.exchange(T{}); + (void) a.exchange(T{}, std::memory_order_relaxed); + (void) a.compare_exchange_weak(o, T{}); + (void) a.compare_exchange_weak(o, T{}, std::memory_order_relaxed); + (void) a.compare_exchange_weak(o, T{}, std::memory_order_relaxed, std::memory_order_relaxed); + (void) a.compare_exchange_strong(o, T{}); + (void) a.compare_exchange_strong(o, T{}, std::memory_order_relaxed); + (void) a.compare_exchange_strong(o, T{}, std::memory_order_relaxed, std::memory_order_relaxed); + a.wait(T{}); + a.wait(T{}, std::memory_order_relaxed); + a.notify_one(); + a.notify_all(); + + if constexpr (std::is_pointer_v) { + std::remove_pointer_t pointee{}; + a = std::addressof(pointee); + + (void) a.operator+=(0); // a += 0 triggers ADL + (void) a.operator-=(0); // a -= 0 triggers ADL + (void) a.operator++(); // ++a triggers ADL + (void) a.operator--(); // --a triggers ADL + (void) a.operator++(0); // a++ triggers ADL + (void) a.operator--(0); // a-- triggers ADL + (void) a.fetch_add(0); + (void) a.fetch_add(0, std::memory_order_relaxed); + (void) a.fetch_sub(0); + (void) a.fetch_sub(0, std::memory_order_relaxed); + } +} + +void test_incomplete_associated_class_all() { // COMPILE-ONLY + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); +} +#endif // !defined(_M_CEE) + int main() { test_ops(); test_ops(); diff --git a/tests/std/tests/P1135R6_atomic_wait/test.cpp b/tests/std/tests/P1135R6_atomic_wait/test.cpp index 7b7e0ea1fd..e3284bd4ea 100644 --- a/tests/std/tests/P1135R6_atomic_wait/test.cpp +++ b/tests/std/tests/P1135R6_atomic_wait/test.cpp @@ -3,6 +3,59 @@ #include "test_atomic_wait.hpp" +#ifndef _M_CEE // TRANSITION, VSO-1659496 +// GH-140: STL: We should _STD qualify _Ugly function calls to avoid ADL +template +struct holder { + T t; +}; + +struct incomplete; + +template +struct tagged_trivial { + T t; +}; + +template +void test_incomplete_associated_class() { // COMPILE-ONLY + std::atomic a; + a.wait(T{}); + a.wait(T{}, std::memory_order_relaxed); + a.notify_one(); + a.notify_all(); + + std::atomic_wait(std::addressof(a), T{}); + std::atomic_wait_explicit(std::addressof(a), T{}, std::memory_order_relaxed); + std::atomic_notify_one(std::addressof(a)); + std::atomic_notify_all(std::addressof(a)); +} + +void test_incomplete_associated_class_all() { // COMPILE-ONLY + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + test_incomplete_associated_class>>(); + + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); + test_incomplete_associated_class>*>(); +} +#endif // !defined(_M_CEE) + int main() { assert(__std_atomic_set_api_level(__std_atomic_api_level::__has_wait_on_address) == __std_atomic_api_level::__has_wait_on_address);