From 50fdd465e9468ce4b67800a7d07cec5af7c88a71 Mon Sep 17 00:00:00 2001 From: Max Winkler Date: Thu, 5 Nov 2020 10:23:08 -0800 Subject: [PATCH] [EASTL 3.17.02] (#395) eastl::atomic - fix all the spelling mistakes in the doc - Added support for non-trivially default constructible types - Cleaned up comments and impl - improved 128-bit load code gen - fixed type pun to support non-trivially default constructible types - ensure msvc instrinics do not emit prefetch instructions EASTL: to_array implementation EASTL: fix for rbtree input iterator ctor moving elements from the source container --- include/EASTL/array.h | 53 +- include/EASTL/atomic.h | 164 +- .../EASTL/internal/atomic/arch/arm/arch_arm.h | 4 +- .../internal/atomic/arch/arm/arch_arm_load.h | 24 +- .../atomic/arch/arm/arch_arm_memory_barrier.h | 1 + .../EASTL/internal/atomic/arch/x86/arch_x86.h | 24 +- .../atomic/arch/x86/arch_x86_add_fetch.h | 34 +- .../atomic/arch/x86/arch_x86_and_fetch.h | 34 +- .../atomic/arch/x86/arch_x86_cmpxchg_strong.h | 26 +- .../atomic/arch/x86/arch_x86_exchange.h | 12 +- .../atomic/arch/x86/arch_x86_fetch_add.h | 32 +- .../atomic/arch/x86/arch_x86_fetch_and.h | 32 +- .../atomic/arch/x86/arch_x86_fetch_or.h | 32 +- .../atomic/arch/x86/arch_x86_fetch_sub.h | 32 +- .../atomic/arch/x86/arch_x86_fetch_xor.h | 32 +- .../internal/atomic/arch/x86/arch_x86_load.h | 55 +- .../atomic/arch/x86/arch_x86_or_fetch.h | 34 +- .../internal/atomic/arch/x86/arch_x86_store.h | 20 +- .../atomic/arch/x86/arch_x86_sub_fetch.h | 34 +- .../atomic/arch/x86/arch_x86_xor_fetch.h | 34 +- include/EASTL/internal/atomic/atomic.h | 113 +- .../EASTL/internal/atomic/atomic_asserts.h | 17 +- .../EASTL/internal/atomic/atomic_base_width.h | 61 +- include/EASTL/internal/atomic/atomic_casts.h | 27 +- include/EASTL/internal/atomic/atomic_flag.h | 4 +- .../EASTL/internal/atomic/atomic_integral.h | 12 +- include/EASTL/internal/atomic/atomic_macros.h | 8 +- .../atomic/atomic_macros/atomic_macros.h | 58 + .../atomic/atomic_macros/atomic_macros_base.h | 2 +- .../atomic_macros_memory_barrier.h | 2 +- .../atomic_macros/atomic_macros_or_fetch.h | 1 + .../EASTL/internal/atomic/atomic_pointer.h | 16 +- .../internal/atomic/atomic_size_aligned.h | 10 +- .../EASTL/internal/atomic/compiler/compiler.h | 4 + .../internal/atomic/compiler/compiler_load.h | 12 +- .../atomic/compiler/gcc/compiler_gcc.h | 27 +- .../compiler/gcc/compiler_gcc_barrier.h | 1 - .../atomic/compiler/msvc/compiler_msvc.h | 31 +- .../compiler/msvc/compiler_msvc_add_fetch.h | 2 +- .../compiler/msvc/compiler_msvc_and_fetch.h | 25 +- .../msvc/compiler_msvc_cmpxchg_strong.h | 25 +- .../compiler/msvc/compiler_msvc_exchange.h | 2 +- .../compiler/msvc/compiler_msvc_fetch_add.h | 2 +- .../compiler/msvc/compiler_msvc_fetch_and.h | 25 +- .../compiler/msvc/compiler_msvc_fetch_or.h | 25 +- .../compiler/msvc/compiler_msvc_fetch_sub.h | 2 +- .../compiler/msvc/compiler_msvc_fetch_xor.h | 25 +- .../compiler/msvc/compiler_msvc_or_fetch.h | 25 +- .../compiler/msvc/compiler_msvc_sub_fetch.h | 2 +- .../compiler/msvc/compiler_msvc_xor_fetch.h | 25 +- include/EASTL/internal/config.h | 8 +- include/EASTL/internal/red_black_tree.h | 2 +- test/source/TestArray.cpp | 111 +- test/source/TestAtomicAsm.cpp | 58 + test/source/TestAtomicBasic.cpp | 7811 +++++++++-------- test/source/TestSet.cpp | 68 + 56 files changed, 5048 insertions(+), 4314 deletions(-) diff --git a/include/EASTL/array.h b/include/EASTL/array.h index c871b0bd..590aa94b 100644 --- a/include/EASTL/array.h +++ b/include/EASTL/array.h @@ -43,9 +43,9 @@ namespace eastl /// Implements a templated array class as per the C++ standard TR1. /// This class allows you to use a built-in C style array like an STL vector. /// It does not let you change its size, as it is just like a C built-in array. - /// Our implementation here strives to remove function call nesting, as that + /// Our implementation here strives to remove function call nesting, as that /// makes it hard for us to profile debug builds due to function call overhead. - /// Note that this is intentionally a struct with public data, as per the + /// Note that this is intentionally a struct with public data, as per the /// C++ standard update proposal requirements. /// /// Example usage: @@ -75,9 +75,9 @@ namespace eastl count = N }; - // Note that the member data is intentionally public. - // This allows for aggregate initialization of the - // object (e.g. array a = { 0, 3, 2, 4 }; ) + // Note that the member data is intentionally public. + // This allows for aggregate initialization of the + // object (e.g. array a = { 0, 3, 2, 4 }; ) value_type mValue[N ? N : 1]; public: @@ -85,9 +85,9 @@ namespace eastl void fill(const value_type& value); - // Unlike the swap function for other containers, array::swap takes linear time, + // Unlike the swap function for other containers, array::swap takes linear time, // may exit via an exception, and does not cause iterators to become associated with the other container. - void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable::value); + void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable::value); EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT; EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT; @@ -318,7 +318,7 @@ namespace eastl template - EA_CPP14_CONSTEXPR inline typename array::const_reference + EA_CPP14_CONSTEXPR inline typename array::const_reference array::front() const { #if EASTL_ASSERT_ENABLED @@ -382,7 +382,7 @@ namespace eastl #endif EA_ANALYSIS_ASSUME(i < N); - return static_cast(mValue[i]); + return static_cast(mValue[i]); } @@ -479,6 +479,41 @@ namespace eastl } + /////////////////////////////////////////////////////////////////////// + // to_array + /////////////////////////////////////////////////////////////////////// + namespace internal + { + template + EA_CONSTEXPR auto to_array(T (&a)[N], index_sequence) + { + return eastl::array, N>{{a[I]...}}; + } + + template + EA_CONSTEXPR auto to_array(T (&&a)[N], index_sequence) + { + return eastl::array, N>{{eastl::move(a[I])...}}; + } + } + + template + EA_CONSTEXPR eastl::array, N> to_array(T (&a)[N]) + { + static_assert(eastl::is_constructible_v, "element type T must be copy-initializable"); + static_assert(!eastl::is_array_v, "passing multidimensional arrays to to_array is ill-formed"); + return internal::to_array(a, eastl::make_index_sequence{}); + } + + template + EA_CONSTEXPR eastl::array, N> to_array(T (&&a)[N]) + { + static_assert(eastl::is_move_constructible_v, "element type T must be move-constructible"); + static_assert(!eastl::is_array_v, "passing multidimensional arrays to to_array is ill-formed"); + return internal::to_array(eastl::move(a), eastl::make_index_sequence{}); + } + + } // namespace eastl diff --git a/include/EASTL/atomic.h b/include/EASTL/atomic.h index 5072a166..f27a72dc 100644 --- a/include/EASTL/atomic.h +++ b/include/EASTL/atomic.h @@ -10,11 +10,12 @@ #pragma once #endif + ///////////////////////////////////////////////////////////////////////////////// // // Below is the documentation of the API of the eastl::atomic library. // This includes class and free functions. -// Anything marked with a '+' infront of the name is an extension to the std API. +// Anything marked with a '+' in front of the name is an extension to the std API. // @@ -133,7 +134,7 @@ // : Returns the new updated value after the RMW operation. // : Memory is affected according to seq_cst ordering. // -// - T oprator+=/-=/&=/|=/^=(T) +// - T operator+=/-=/&=/|=/^=(T) // : Atomically adds, subtracts, bitwise and/or/xor the atomic object with T. // : Returns the new updated value after the operation. // : Memory is affected according to seq_cst ordering. @@ -251,32 +252,27 @@ // to solve static-init order fiasco, there are other solutions for that. // // 2. -// Description: Atomic template T must always be nothrow default constructible -// Reasoning : If stores are always noexcept then your constructor should not be -// doing anything crazy as well. -// -// 3. // Description: Atomics are always lock free // Reasoning : We don't want people to fall into performance traps where implicit locking // is done. If your user defined type is large enough to not support atomic // instructions then your user code should do the locking. // -// 4. +// 3. // Description: Atomic objects can not be volatile // Reasoning : Volatile objects do not make sense in the context of eastl::atomic. // Use the given memory orders to get the ordering you need. // Atomic objects have to become visible on the bus. See below for details. // -// 5. +// 4. // Description: Consume memory order is not supported // Reasoning : See below for the reasoning. // -// 6. +// 5. // Description: ATOMIC_INIT() macros and the ATOMIC_LOCK_FREE macros are not implemented // Reasoning : Use the is_lock_free() method instead of the macros. // ATOMIC_INIT() macros aren't needed since the default constructor value initializes. // -// 7. +// 6. // Description: compare_exchange failure memory order cannot be stronger than success memory order // Reasoning : Besides the argument that it ideologically does not make sense that a failure // of the atomic operation shouldn't have a stricter ordering guarantee than the @@ -288,7 +284,7 @@ // that versions of compilers that say they support C++17 do not properly adhere to this // new requirement in their intrinsics. Thus we will not support this. // -// 8. +// 7. // Description: All memory orders are distinct types instead of enum values // Reasoning : This will not affect how the API is used in user code. // It allows us to statically assert on invalid memory orders since they are compile-time types @@ -303,16 +299,16 @@ // // ******** DISCLAIMER ******** // -// This documentation is not meant to provide rigourous proofs on the memory models +// This documentation is not meant to provide rigorous proofs on the memory models // of specific architectures or the C++ memory model introduced in C++11. It is not // meant to provide formal mathematical definitions and logic that shows that a given // implementation adheres to the C++ memory model. This isn't meant to be some infallible // oracle on memory models, barriers, observers, and architecture implementation details. // What I do hope a reader gets out of this is the following. An understanding of the C++ // memory model and how that relates to implementations on various architectures. Various -// phenomona and ways that compilers and architectures can steer away from a sequentially +// phenomena and ways that compilers and architectures can steer away from a sequentially // consistent system. To provide examples on how to use this library with common patterns -// that will been seen in many code bases. Lastly I would like to provide insight and +// that will be seen in many code bases. Lastly I would like to provide insight and // further readings into the lesser known topics that aren't shared outside people // who live in this space and why certain things are done the way they are // such as cumulativity of memory barriers as one example. Sometimes specifying barriers @@ -335,7 +331,7 @@ // [3] Evaluating the Cost of Atomic Operations on Modern Architectures // [4] A Tutorial Introduction to the ARM and POWER Relaxed Memory Models // [5] Memory Barriers: a Hardware View for Software Hackers -// [6] Memory Model = Instruction Reordering + Store Atomcity +// [6] Memory Model = Instruction Reordering + Store Atomicity // [7] ArMOR: Defending Against Memory Consistency Model Mismatches in Heterogeneous Architectures // [8] Weak Memory Models: Balancing Definitional Simplicity and Implementation Flexibility // [9] Repairing Sequential Consistency in C/C++11 @@ -350,7 +346,7 @@ // // ******** What does it mean to be Atomic? ******** // -// The word atomic has been overloaded and can mean a lot of differnt things depending on the context, +// The word atomic has been overloaded and can mean a lot of different things depending on the context, // so let's digest it. // // The first attribute for something to be atomic is that concurrent stores and loads @@ -376,14 +372,14 @@ // on a 32-bit ARMv7 core. // // An operation may be considered atomic if multiple sub-operations are done as one -// transanctional unit. This is commonly known as a Read-Modify-Write, RMW, operation. +// transactional unit. This is commonly known as a Read-Modify-Write, RMW, operation. // Take a simple add operation; it is actually a load from memory into a register, // a modification of said register and then a store back to memory. If two threads // concurrently execute this add operation on the same memory location; any interleaving // of the 3 sub-operations is possible. It is possible that if the initial value is 0, // the result may be 1 because each thread executed in lockstep both loading 0, adding 1 // and then storing 1. A RMW operation may be considered atomic if the whole sequence of -// sub-operations are serialized as one transanctional unit. +// sub-operations are serialized as one transactional unit. // // Atomicity may also refer to the order in which memory operations are observed and the // dependencies between memory operations to different memory locations. As a quick example @@ -392,7 +388,7 @@ // the store to B, will we observe r1 == 2. Our intuition tells us that well A was stored // first and then B, so if I read the new value of B then I must also read the new value // of A since the store to A happened before B so if I can see B then I must be able to -// see everthing before B which includes A. +// see everything before B which includes A. // This highlights the ordering of memory operations and why memory barriers and memory // models are so heavily attached to atomic operations because one could classify something // is atomic if the dependency highlighted in the above example is allowed to be maintained. @@ -430,7 +426,7 @@ // in various interconnects from the cpu to the memory itself. One key thing to note is that cpus // do not physically reorder the instruction stream. Instructions are dispatched and retired // in-order but executed out-of-order. Memory barriers will prevent these tricks from happening -// by controling the interaction of multiple cpus. +// by controlling the interaction of multiple cpus. // // Compilers will morph your code and physically move instructions around as long as the program // has the same observed behaviour. This is becoming increasingly true with more optimization techniques @@ -439,7 +435,7 @@ // This means the compiler does indeed alter the instruction stream // and compiler barriers are a way to tell them to not move any memory instructions across the barrier. // This does not prevent a compiler from doing optimizations such as constant folding, merging of -// overlapping loads, or even dead store elimination. Compiler barries are also very cheap and +// overlapping loads, or even dead store elimination. Compiler barriers are also very cheap and // have zero impact on anything that the compiler knows isn't visible in memory such as local variables // whose addresses do not escape the function even if their address is taken. You can think of it // in terms of a sequence point as used with "volatile" qualified variables to denote a place in code where @@ -512,16 +508,16 @@ // ******** Adding Caches ******** // // Caches by nature implicitly add the potential for memory reordering. A centralized shared snoopy bus that we all learned in school -// makes it easy to implement sequential consistency with caches. Writes and reads are all serialized in a total order via the cache bus transanction +// makes it easy to implement sequential consistency with caches. Writes and reads are all serialized in a total order via the cache bus transaction // ordering. Every modern day bus is not inorder, and most certainly not a shared centralized bus. Cache coherency guarantees that all memory operations // will be propagated eventually to all parties, but it doesn't guarantee in what order or in what time frame. Once you add -// caches, various levels of caching and various interconnects between remote cpus, you ineviatably run into the issue where -// some cpus observe the affects of a store before other cpus. Obviously we have weakly-ordered and strongly-ordered cpus with -// caches so why is that? The short answer is, where is the onous put, is it on the programmer or the hardware. Does the hardware +// caches, various levels of caching and various interconnects between remote cpus, you inevitably run into the issue where +// some cpus observe the effects of a store before other cpus. Obviously we have weakly-ordered and strongly-ordered cpus with +// caches so why is that? The short answer is, where is the onus put, is it on the programmer or the hardware. Does the hardware // have dependency tracking, is it able to determine when a memory order violation occurs such as rolling back its speculative execution // and also how far along the chain of interconnects does the hardware wait before it determines that the memory operation has // been acknowledged or is considered to satisfy its memory ordering guarantees. Again this is a very high level view of the system -// as a whole, but the take away is yes; caches do add the potential for reordering but other supporting hardware determines whether +// as a whole, but the takeaway is yes; caches do add the potential for reordering but other supporting hardware determines whether // that is observable by the programmer. There is also some debate whether weakly-ordered processors are actually more performant // than strongly-ordered cpus eluding to the fact that the hardware has a better picture of what is a violation versus the programmer // having to emit far more barriers on weakly-ordered architectures in multi-threaded code which may actually not be needed because the @@ -533,7 +529,7 @@ // Store buffers are simple fixed size structures that sit between the cpu and the memory hierarchy. This allows // each cpu to record its write in the store buffer and then move onto the next instruction. The store buffer will // eventually be flushed to the resulting memory hierarchy in FIFO order. How and when this flushing occurs is irrelevant to the -// understanding of a store buffer. A read from an adress will grab the most recent write to the same address in the store buffer. +// understanding of a store buffer. A read from an address will grab the most recent write to the same address in the store buffer. // // The introduction of a store buffer is our first dive into weaker memory consistency. The addition of this hardware turns the consistency model weaker, // into one that is commonly known as TSO, Total-Store Order. This is the exact model used by x86 cpus and we will see what this means @@ -598,10 +594,10 @@ // --------------------------- // // This STLD barrier effectively will flush the store buffer into the memory hierarchy ensuring all stores in the buffer are visible to all other cpus at the same time -// before executing the load instruction. Again nothing prevents a potenital hardware from speculatively executing the load even with the STLD barrier, the hardware will have to do +// before executing the load instruction. Again nothing prevents a potential hardware from speculatively executing the load even with the STLD barrier, the hardware will have to do // a proper rollback if it detected a memory order violation otherwise it can continue on with its speculative load. The barrier just delimits a stability point. // -// Most hardware does not provide granular barrier semenatics such as STLD. Most provide a write memory barrier which only orders stores, STST, a read memory barrier +// Most hardware does not provide granular barrier semantics such as STLD. Most provide a write memory barrier which only orders stores, STST, a read memory barrier // which only orders loads, LDLD, and then a full memory barrier which is all 4 permutations. So on x86 we will have to use the mfence, memory fence, instruction // which is a full memory barrier to get our desired STLD requirements. // @@ -611,11 +607,11 @@ // // Let's look at a non-FIFO store buffer now as seen in ARM cpus as an example and we will use a standard Message Passing example to see how it manifests in even weaker consistency. // A store buffer on ARM as an example allows write merging even with adjacent stores, is not a FIFO queue, any stores in the small hardware hash table may be ejected at any point -// due to a collision eviction or the availability of cache lines in the cache hierarchy meaning that stores may bypass the buffer entirely if that cacheline is already owned by that cpu. +// due to a collision eviction or the availability of cachelines in the cache hierarchy meaning that stores may bypass the buffer entirely if that cacheline is already owned by that cpu. // There is no guarantee that stores will be completed in order as in the FIFO case. // // --------------------------- -// Inital State: +// Initial State: // x = 0; y = 0; // --------------------------- // Thread 0 | Thread 1 @@ -632,14 +628,14 @@ // Let's see how this breaks with a non-FIFO store buffer. // // Thread 0 executes the STORE(x, 1) but the cacheline for x is not in thread 0's cache so we write to the store buffer and wait for the cacheline. -// Thread 1 executes the LOAD(y) and it also does not have y in its cacheline so it waits before completeing the load. +// Thread 1 executes the LOAD(y) and it also does not have y in its cacheline so it waits before completing the load. // Thread 0 moves on to STORE(y, 1). It owns this cacheline, hypothetically, so it may bypass the store buffer and store directly to the cache. -// Thread 0 receives message that Thread 1 needs y's cacheline, so it transfers the now modified cacheline to Thread 1. +// Thread 0 receives a message that Thread 1 needs y's cacheline, so it transfers the now modified cacheline to Thread 1. // Thread 1 completes the load with the updated value of y = 1 and branches out of the while loop since we saw the new value of y. // Thread 1 executes LOAD(x) which will return 0 since Thread 0 still hasn't flushed its store buffer waiting for x's cacheline. // Thread 0 receives x's cacheline and now flushes x = 1 to the cache. Thread 1 will also have invalidated its cacheline for x that it brought in via the previous load. // -// We have now fallen victim to STST reordering, allowing Thread 1 to observe a load of x returning 0. Not only does this store buffer allow STLD reording due to the nature of +// We have now fallen victim to STST reordering, allowing Thread 1 to observe a load of x returning 0. Not only does this store buffer allow STLD reordering due to the nature of // buffering stores, but it also allows another reordering; that of Store-Store reordering. It was observed as if Thread 0 executed STORE(y, 1) before STORE(x, 1) which completely // broke our simple message passing scenario. // @@ -663,12 +659,12 @@ // Due to the cache coherency protocol in play, a write to a cacheline will have to send invalidation messages to all other cpus that may have that cacheline as well. // Immediately executing and responding to invalidation messages can cause quite a stall especially if the cache is busy at the moment with other requests. // The longer we wait to invalidate the cacheline, the longer the remote cpu doing the write is stalled waiting on us. We don't like this very much. -// Invalidation Queues are just that, we queue up the action of actually invalidating the cache line but immediately respond to the request saying we did it anyway. +// Invalidation Queues are just that, we queue up the action of actually invalidating the cacheline but immediately respond to the request saying we did it anyway. // Now the remote cpu thinks we invalidated said cacheline but actually it may very well still be in our cache ready to be read from. We just got weaker again, let's // see how this manifests in code by starting from the end of our previous example. // // --------------------------- -// Inital State: +// Initial State: // x = 0; y = 0; // --------------------------- // Thread 0 | Thread 1 @@ -745,7 +741,7 @@ // STORE(&(y + r0 - r1), 1) | STORE(&(x + r1 - r1), 1) // ----------------------------------------------------- // -// Both fixes above ensure that both writes cannot be commited, made globally visible, until their program source code order preceeding reads have been fully satisfied. +// Both fixes above ensure that both writes cannot be committed, made globally visible, until their program source code order preceding reads have been fully satisfied. // // ******** Compiler Barriers ******** // @@ -753,7 +749,7 @@ // loads and stores from moving up above the compiler barrier. Here we will see the various ways our code may be subject // to compiler optimizations and why compiler barriers are needed. Note as stated above, compiler barriers may not // prevent all compiler optimizations or transformations. Compiler barriers are usually implemented by reloading all -// variables that currently cached in registers and flushing all stores in registers back to memory. +// variables that are currently cached in registers and flushing all stores in registers back to memory. // This list isn't exhaustive but will hopefully try to outline what compiler barriers protect against and what they don't. // // Compiler may reorder loads. @@ -769,14 +765,14 @@ // operations and STORE result into A; operations and STORE result int B; -> all operations; STORE result into B; STORE result into A; // // Insert a compiler barrier in between the two stores to guarantee that they are kept in order. -// It is not required the multiple stores to A before the barrier are not merged into one final store. +// It is not required that the multiple stores to A before the barrier are not merged into one final store. // It is not required that the store to B after the barrier be written to memory, it may be cached in a register for some indeterminate // amount of time as an example. // STORE(A, 1); COMPILER_BARRIER; STORE(B, 1); // // The compiler is allowed to merge overlapping loads and stores. // Inserting a compiler barrier here will not prevent the compiler from doing this optimization as doing one wider load/store is -// technically still abidding by the guarantee that the loads/stores are not reordered with each other. +// technically still abiding by the guarantee that the loads/stores are not reordered with each other. // LOAD A[0]; LOAD A[1]; -> A single wider LOAD instruction // STORE(A[0], 1); STORE(A[1], 2); -> A single wider STORE instruction // @@ -831,8 +827,8 @@ // STORE(A, 1); // // The compiler is well within its rights to omit the second store to A. Assuming we are doing some fancy lockfree communication -// with another cpu and the last store is meant to ensure the ending value is 1 even if another cpu changed A inbetween; that -// assumption will not be satisfied. A compiler barrier will not prevent the last store from be dead-store removed. +// with another cpu and the last store is meant to ensure the ending value is 1 even if another cpu changed A in between; that +// assumption will not be satisfied. A compiler barrier will not prevent the last store from being dead-store removed. // // STORE(A, 1); // OPERATIONS; @@ -891,7 +887,7 @@ // only conditional branches. The problem is compilers do not understand control dependencies, and control dependencies // are incredibly hard to understand. This is meant to make the reader aware they exist and to never use them // because they shouldn't be needed at all with eastl::atomic. Also control dependencies are categorized as LDLD or LDST, -// store control dependencies inheritly do not make sense since the conditional branch loads and compares two values. +// store control dependencies inherently do not make sense since the conditional branch loads and compares two values. // // A LDLD control dependency is an anti-pattern since it is not guaranteed that any architecture will detect the memory-order violation. // r0 = LOAD(A); @@ -901,7 +897,7 @@ // Given those sequence of instructions, it is entirely possible that a cpu attempts to speculatively predict and load the value of B // before the branch instruction has finished executing. It is entirely allowed that the cpu loads from B, assume B is in cache and A // is not in cache, before A. It is allowed, that even if the cpu was correct in it's prediction that it doesn't reload B and change the -// fact the it speculatively got lucky. +// fact that it speculatively got lucky. // // This is also what the x86 pause instruction inserted into spin wait loops is meant to solve. // LOOP: @@ -912,7 +908,7 @@ // x86 will catch a memory order violation if it sees that an external store was done to A and thus must flush the entire // pipeline of all the speculated load A. Pause instruction tells the cpu to not do speculative loads so that the pipeline is not // filled with all said speculative load instructions. This ensures we do not incur the costly pipeline flushes from memory order -// violations which is likely to occur in tight spin wait loops. This also allows other threads on the same physical core to use the +// violations which are likely to occur in tight spin wait loops. This also allows other threads on the same physical core to use the // core's resources better since our speculative nature won't be hogging it all. // // A LDST control dependency is a true dependency in which the cpu cannot make a store visible to the system and other cpus until it @@ -924,7 +920,7 @@ // // The fun part comes in with how does the compiler actually break all of this. // First is that if the compiler can ensure that the value of A in the LDST example is always not zero, then it is always within its -// righs to completely remove the if statement which would lend us with no control dependency. +// rights to completely remove the if statement which would lend us with no control dependency. // // Things get more fun when we deal with conditionals with else and else if statements where the compiler might be able to employ // invariant code motion optimizations. Take this example. @@ -947,7 +943,7 @@ // // Things can get even more complicated especially in C++ when values may come from constexpr, inline, inline constexpr, static const, etc, // variables and thus the compiler will do all sorts of transformations to reduce, remove, augment and change all your conditional code since -// it knows the values of the expressions or even parts of it at compile time. Even more agressive optimizations like LTO might break code that was being cautious. +// it knows the values of the expressions or even parts of it at compile time. Even more aggressive optimizations like LTO might break code that was being cautious. // Even adding simple short circuiting logic or your classic likely/unlikely macros can alter conditionals in ways you didn't expect. // In short know enough about control dependencies to know not to ever use them. // @@ -963,7 +959,7 @@ // Those are the above variations of Store Atomicity. Most processors have Non-Atomic Store Atomicity and thus you must program to that lowest common denominator. // We can use barriers, with some caveats, to restore Multi-Copy Store Atomicity to a Non-Atomic system though we need to define a new granular definition for // memory barriers to define this behaviour. Simple LDLD/LDST/STST/STLD definition is not enough to categorize memory barriers at this level. Let's start off -// with a simple example that breaks under a Non-Atomic Store Atomicity system and what potenital hardware features allow this behaviour to be observed. +// with a simple example that breaks under a Non-Atomic Store Atomicity system and what potential hardware features allow this behaviour to be observed. // // NOTE: For all the below examples we assume no compile reordering and that the processor also executes the instructions with no local reorderings to make the examples simpler, // to only show off the effects of Multi-Copy Store Atomicity. This is why we don't add any address dependencies, or mark explicit LDLD/LDST memory barriers. @@ -972,7 +968,7 @@ // --------------------------------------------------------------------------------------------------------- // Write-To-Read Causality, WRC, Litmus Test // --------------------------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // X = 0; Y = 0; // --------------------------------------------------------------------------------------------------------- // Thread 0 | Thread 1 | Thread 2 @@ -984,7 +980,7 @@ // --------------------------------------------------------------------------------------------------------- // // Let's go over this example in detail and whether the outcome shown above can be observed. In this example Thread 0 stores 1 into X. If Thread 1 observes the write to X, -// it stores the observed value into Y. Thread 2 loads from Y then X. This means if the load from Y retuns 1, then we intuitively know the global store order +// it stores the observed value into Y. Thread 2 loads from Y then X. This means if the load from Y returns 1, then we intuitively know the global store order // was 1 to X and then 1 to Y. So is it possible then that the load from X in Thread 2 can return 0 in that case? Under a Multi-Copy Store Atomicity system, that would be // impossible because once 1 was stored to X all cpus see that store so if Thread 2 saw the store to Y which can only happen after the store to X was observed, then // Thread 2 must also have observed the store to X and return 1. As you may well have figured out, it is possible under a Non-Atomic Store Atomicity system to still @@ -1000,7 +996,7 @@ // has an SMT value of 2. Thread 0 will store 1 into X. This store may be in the store buffer or in the L1 cache that cpu 1 also shares with cpu 0, thus cpu 1 has early access to cpu 0's stores. // Thread 1 loads X which it observed as 1 early and then stores 1 into Y. Thread 2 may see the load from Y returning 1 but now the load from X returning 0 all because cpu 1 got early // access to cpu 0 store due to sharing a L1 cache or store buffer. -// We will come back on how to fix this example with the proper memory barries for the Non-Atomic Store Atomicity systems, but we need to detour first. +// We will come back on how to fix this example with the proper memory barriers for the Non-Atomic Store Atomicity systems, but we need to detour first. // // We need to take a deeper dive into memory barriers to understand how to restore Multi-Copy Store Atomicity from a Non-Atomic Store Atomicity system. // Let's start with a motivating example and we will be using the POWER architecture throughout this example because it encompasses all the possible observable behaviour. @@ -1021,14 +1017,14 @@ // IRIW : YES | IRIW : NO // // The TSO memory model provided by x86 seems to be exactly the same as POWER if we add lwsync memory barrier instructions in between each of the memory instructions. -// This provides us the exact same ordering guarantees as the TSO memory model. If we just looked at the 4 permuatations of reorderings we would be inclined to assume that -// TSO has the exact same ordering as sprinkling lwsync in our code inbetween every pair of memory instructions. That is not the case because memory barrier causality and cumulativity differ in subtle ways. +// This provides us the exact same ordering guarantees as the TSO memory model. If we just looked at the 4 permutations of reorderings we would be inclined to assume that +// TSO has the exact same ordering as sprinkling lwsync in our code in between every pair of memory instructions. That is not the case because memory barrier causality and cumulativity differ in subtle ways. // In this case they differ by the implicit guarantees from the TSO memory model versus those provided by the POWER lwsync memory barrier. // So the lwsync memory barrier prevents reordering with instructions that have causality but does not prevent reordering with instructions that are completely independent. // Let's dive into these concepts a bit more. // // Non-Atomic Store Atomicity architectures are prone to behaviours such as the non-causal outcome of the WRC test above. Architectures such as POWER defines memory barriers to enforce -// ordering with respect to memory accesses in remote cpus other than the cpu actually issuing the memory barrier. This is known a memory barrier cumulativity. +// ordering with respect to memory accesses in remote cpus other than the cpu actually issuing the memory barrier. This is known as memory barrier cumulativity. // How does the memory barrier issued on my cpu affect the view of memory accesses done by remote cpuss. // // Cumulative memory barriers are defined as follows - Take your time this part is very non-trivial: @@ -1064,13 +1060,13 @@ // WRC litmus test represents a scenario where only a A-Cumulative memory barrier is needed. The lwsync not only provides the needed local LDST memory barrier for the local thread but also ensures // that any write Thread 1 has read from before the memory barrier is kept in order with any write Thread 1 does after the memory barrier as far as any other thread observes. // In other words it ensures that any write that has propagated to Thread 1 before the memory barrier is propagated to any other thread before the second store after the memory barrier in Thread 1 -// can propagte to other threads in the system. This is exactly the definition of A-Cumulativity and what we need to ensure that causality is maintained in the WRC Litmus Test example. +// can propagate to other threads in the system. This is exactly the definition of A-Cumulativity and what we need to ensure that causality is maintained in the WRC Litmus Test example. // With that lwsync in place it is now impossible to observe r0 = 1 && r1 = 1 && r2 = 0. The lwsync has restored causal ordering. Let's look at an example that requires B-Cumulativity. // // --------------------------------------------------------------------------------------------------------- // Example 2 from POWER manual // --------------------------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // X = 0; Y = 0; Z = 0 // --------------------------------------------------------------------------------------------------------- // Thread 0 | Thread 1 | Thread 2 @@ -1098,15 +1094,15 @@ // First the lwsync provides the needed local STST memory barrier for the local thread, thus the lwsync here ensures that the store to X propagates to Thread 1 before the store to Y. // B-Cumulativity applied to all operations after the memory barrier ensure that the store to X is // kept in order with respect to the store to Z as far as all other threads participating in the dependency chain are concerned. This is the exact definition of B-Cumulativity. -// With this one lwsync the outcome outlined above is impossble to observe. If r0 = 1 && r1 = 1 then r2 must be properly observed to be 1. +// With this one lwsync the outcome outlined above is impossible to observe. If r0 = 1 && r1 = 1 then r2 must be properly observed to be 1. // // We know that lwsync only provides A-Cumulativity and B-Cumulativity. Now we will look at examples that have no causality constraints thus we need to grab heavier memory barriers -// that ensure in short we will say makes a store visible to all processors even those not on the dependency chains. Let's get to the first example. +// that ensures in short we will say makes a store become visible to all processors, even those not on the dependency chains. Let's get to the first example. // // --------------------------------------------------------------------------------------------------------- // Independent Reads of Independent Writes, IRIW, coined by Doug Lea // --------------------------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // X = 0; Y = 0; // --------------------------------------------------------------------------------------------------------- // Thread 0 | Thread 1 | Thread 2 | Thread 3 @@ -1134,7 +1130,7 @@ // To ensure that the above observation is forbidden we need to add a full sync memory barrier on both the reading threads. Think of sync as restoring sequential consistency. // The sync memory barrier ensures that any writes that Thread 1 has read from before the memory barrier are fully propagated to all threads before the reads are satisfied after the memory barrier. // The same can be said for Thread 3. This is why the sync memory barrier is needed because there is no partial causal ordering here or anything that can be considered for our A and B Cumulativity definitions. -// We must ensure that all writes have been propagated to all cpus before proceeding. This gives way to the difference between sync and lwsync with regards to visiblity of writes and cumulativity. +// We must ensure that all writes have been propagated to all cpus before proceeding. This gives way to the difference between sync and lwsync with regards to visibility of writes and cumulativity. // sync guarantees that all program-order previous stores must have been propagated to all other cpus before the memory instructions after the memory barrier. // lwsync does not ensure that stores before the memory barrier have actually propagated to any other cpu before memory instructions after the memory barrier, but it will keep stores before and after the // lwsync in order as far as other cpus are concerned that are within the dependency chain. @@ -1183,9 +1179,9 @@ // STORE_RELEASE(FLAG, 1) | r0 = LOAD(DATA) // ------------------------------------------------------ // -// This a common message passing idiom that also shows the use of Release-Acquire semantics. It should be obvious by the definitions outlined above why this works. +// This is a common message passing idiom that also shows the use of Release-Acquire semantics. It should be obvious by the definitions outlined above why this works. // An Acquire operation attached to a load needs to provide a LDLD and LDST memory barrier according to our definition of acquire. This is provided by default on x86 TSO thus no memory barrier is emitted. -// A Release operation attached to a store needs to provde a STST and LDST memory barrier according to our definition of release. This is provided by default on x86 TSO thus no memory barrier is emitted. +// A Release operation attached to a store needs to provide a STST and LDST memory barrier according to our definition of release. This is provided by default on x86 TSO thus no memory barrier is emitted. // // A couple of things of note here. One is that by attaching the semantics of a memory model directly to the memory instruction/operation itself we can take advantage of the fact the some processors // already provide guarantees between memory instructions and thus we do not have to emit memory barriers. Another thing of note is that the memory model is directly attached to the operation, @@ -1238,7 +1234,7 @@ // | STORE_RELEASE(Y, r0) | r2 = LOAD(X) // --------------------------------------------------------------------------------------------------------- // -// You may notice both of these examples from the previous section. We replaced the standalone POWER memory barrier instructions with Release-Acquire semantics attached directly to the operations where we want causality perserved. +// You may notice both of these examples from the previous section. We replaced the standalone POWER memory barrier instructions with Release-Acquire semantics attached directly to the operations where we want causality preserved. // We have transformed those examples to use the eastl::atomic memory model. // Take a moment to digest these examples in relation to the definition of Release-Acquire semantics. // @@ -1333,7 +1329,7 @@ // // The above shows a more elaborate example of how data dependent dependencies flow through RAW chains either through memory or through registers. // -// Notice by identify that this is a data dependent operation and asking for a consume ordering, we can completely eliminate the memory barrier on Thread 1 since we know ARMv7 does not reorder data dependent loads. Neat. +// Notice by identifying that this is a data dependent operation and asking for a consume ordering, we can completely eliminate the memory barrier on Thread 1 since we know ARMv7 does not reorder data dependent loads. Neat. // Unfortunately every major compiler upgrades a consume to an acquire ordering, because the consume ordering in the standard has a stronger guarantee and requires the compiler to do complicated dependency tracking. // Dependency chains in source code must be mapped to dependency chains at the machine instruction level until a std::kill_dependency in the source code. // @@ -1398,7 +1394,7 @@ // ******** Relaxed && eastl::atomic guarantees ******** // // We saw various ways that compiler barriers do not help us and that we need something more granular to make sure accesses are not mangled by the compiler to be considered atomic. -// Ensuring these guarantees like preventing dead-store elimination or the spliting of stores into smaller sub stores is where the C/C++11 +// Ensuring these guarantees like preventing dead-store elimination or the splitting of stores into smaller sub stores is where the C/C++11 // standard comes into play to define what it means to operate on an atomic object. // These basic guarantees are provided via new compiler intrinsics on gcc/clang that provide explicit indication to the compiler. // Or on msvc by casting the underlying atomic T to a volatile T*, providing stronger compiler guarantees than the standard requires. @@ -1406,7 +1402,7 @@ // reordered across sequence points. Again we are not using volatile here to guarantee atomicity, we are using it in its very intended purpose // to tell the compiler it cannot assume anything about the contents of that variable. Now let's dive into the base guarantees of eastl::atomic. // -// The standard defines the follow for all operations on an atomic object M. +// The standard defines the following for all operations on an atomic object M. // // Write-Write Coherence: // If an operation A modifies an atomic object M(store), happens before an operation B that modifies M(store), then A shall be earlier than B in the modification order of M. @@ -1423,7 +1419,7 @@ // If a side effect X on an atomic object M(store), happens before a value computation B on M(load), then the evaluation of B must take its value from X or from some side effect Y that follows X in the // modification order of M. // -// What does all this mean. This is just pedantic way of saying that the preceeding coherence requirements disallow compiler reordering of atomic operations to a single atomic object. +// What does all this mean. This is just a pedantic way of saying that the preceding coherence requirements disallow compiler reordering of atomic operations to a single atomic object. // This means all operations must be emitted by the compiler. Stores cannot be dead-store eliminated even if they are the only stores. // Loads cannot have common subexpression elimination performed on them even if they are the only loads. // Loads and Stores to the same atomic object cannot be reordered by the compiler. @@ -1433,8 +1429,8 @@ // // ******** Same Address LoadLoad Reordering ******** // -// It is expected that same address operations cannot and are not reordered with each other. It is expected that operations to the same address have sequetial consistency because -// they are to the same address. If you picture a cpu executing instructions, how is it possible to reorder instructions to the same address and yet kept program behaviour the same. +// It is expected that same address operations cannot and are not reordered with each other. It is expected that operations to the same address have sequential consistency because +// they are to the same address. If you picture a cpu executing instructions, how is it possible to reorder instructions to the same address and yet keep program behaviour the same. // Same Address LoadLoad Reordering is one weakening that is possible to do and keep observed program behaviour for a single-threaded program. // More formally, A and B are two memory instructions onto the same address P, where A is program ordered before B. If A and B are both loads then their order need not be ordered. // If B is a store then it cannot retire the store before A instruction completes. If A is a store and B is a load, then B must get its value forwarded from the store buffer or observe a later store @@ -1445,7 +1441,7 @@ // --------------------------- // Same Address LoadLoad // --------------------------- -// Inital State: +// Initial State: // x = 0; // --------------------------- // Thread 0 | Thread 1 @@ -1456,7 +1452,7 @@ // Observed: r0 = 1 && r0 = 0 // --------------------------- // -// Notice in the above example it has appeared as if the two loads from the same address have been reordered. If we first observed the new store of 1, then the next load should not observed a value in the past. +// Notice in the above example it has appeared as if the two loads from the same address have been reordered. If we first observed the new store of 1, then the next load should not observe a value in the past. // Many programmers, expect same address sequential consistency, all accesses to a single address appear to execute in a sequential order. // Notice this violates the Read-Read Coherence for all atomic objects defined by the std and thus provided by eastl::atomic. // @@ -1498,7 +1494,7 @@ // This can be used to add synchronization to a series of several relaxed atomic operations, as in the following trivial example. // // ---------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // x = 0; // eastl::atomic y = 0; // z = 0; @@ -1518,12 +1514,12 @@ // ******** Atomic vs Standalone Fence ******** // // A sequentially consistent fence is stronger than a sequentially consistent operation because it is not tied to a specific atomic object. -// An atomic fence must provide sychronization with ANY atomic object where as the ordering on the atomic object itself must only provide +// An atomic fence must provide synchronization with ANY atomic object whereas the ordering on the atomic object itself must only provide // that ordering on that SAME atomic object. Thus this can provide cheaper guarantees on architectures with dependency tracking hardware. // Let's look at a concrete example that will make this all clear. // // ---------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // eastl::atomic y = 0; // eastl::atomic z = 0; // ---------------------------------------------------------------------------------------- @@ -1540,7 +1536,7 @@ // In the above example if we observe r0 = 1 it is impossible to observe r1 = 0. // // ---------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // eastl::atomic x = 0; // eastl::atomic y = 0; // eastl::atomic z = 0; @@ -1560,7 +1556,7 @@ // observing r1 = 0 even if we observe r0 = 1. For example the following code may fail. // // ---------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // eastl::atomic x = 0; // eastl::atomic y = 0; // eastl::atomic z = 0; @@ -1575,7 +1571,7 @@ // ---------------------------------------------------------------------------------------- // // ---------------------------------------------------------------------------------------- -// Inital State: +// Initial State: // eastl::atomic x = 0; // eastl::atomic y = 0; // eastl::atomic z = 0; @@ -1589,7 +1585,7 @@ // Observed: r0 = 1 && r1 = 0 // ---------------------------------------------------------------------------------------- // -// In this example it is entirely possible that we observe r0 = 1 && r1 = 0 even though we have source code causility and sequentially consistent operations. +// In this example it is entirely possible that we observe r0 = 1 && r1 = 0 even though we have source code causality and sequentially consistent operations. // Observability is tied to the atomic object on which the operation was performed and the thread fence doesn't synchronize-with the fetch_add because there is no // there is no load above the fence that reads the value from the fetch_add. // @@ -1603,7 +1599,7 @@ // // All memory_order_seq_cst operations exhibit the below single total order in which all threads observe all modifications in the same order // -// Paraphrashing, there is a single total order on all memory_order_seq_cst operations, S, such that each sequentially consistent operation B that loads a value from +// Paraphrasing, there is a single total order on all memory_order_seq_cst operations, S, such that each sequentially consistent operation B that loads a value from // atomic object M observes either the result of the last sequentially consistent modification A on M, or some modification on M that isn't memory_order_seq_cst. // For atomic modifications A and B on an atomic object M, B occurs after A in the total order of M if: // there is a memory_order_seq_cst fence X whereby A is sequenced before X, and X precedes B, @@ -1659,7 +1655,7 @@ // Observed: r0 = 1 && r1 = 0 && r2 = 0 // ------------------------------------------------------------------------------------------------ // -// You'll notice this example is an inbetween example of the Store-Buffer and IRIW examples we have seen earlier. The store in Thread 0 needs to be sequentially consistent so it synchronizes with the +// You'll notice this example is an in between example of the Store-Buffer and IRIW examples we have seen earlier. The store in Thread 0 needs to be sequentially consistent so it synchronizes with the // thread fence in Thread 1. C++20 due to Reference [9], increased the strength of sequentially consistent fences has been increased to allow for the following. // // ------------------------------------------------------------------------------------------------ @@ -1683,8 +1679,8 @@ // // ******** False Sharing ******** // -// As we know operations work on the granularity of a cache line. A RMW operation obviously must have some help from the cache to ensure the entire operation -// is seen a one whole unit. Conceptually we can think of this as the cpu's cache taking a lock on the cacheline, the cpu doing the read-modify-write operation on the +// As we know operations work on the granularity of a cacheline. A RMW operation obviously must have some help from the cache to ensure the entire operation +// is seen as one whole unit. Conceptually we can think of this as the cpu's cache taking a lock on the cacheline, the cpu doing the read-modify-write operation on the // locked cacheline, and then releasing the lock on the cacheline. This means during that time any other cpu needing that cacheline must wait for the lock to be released. // // If we have two atomic objects doing RMW operations and they are within the same cacheline, they are unintentionally contending and serializing with each other even @@ -1718,7 +1714,7 @@ // If the value in memory is 0x22222233 then the first cmpxchg succeeded, then the second cmpxchg succeeded and finally our // byte to memory was stored, yet our load returned 0x11111133. This is because store buffer contents can be forwarded to overlapping loads. // It is possible that the byte store got put in the store buffer. Our load happened after the first cmpxchg with the byte forwarded. -// This behaviour is fine as along as your algorithm is able to cope with this kind of store buffer forwarding effects. +// This behaviour is fine as long as your algorithm is able to cope with this kind of store buffer forwarding effects. // // Reference [13] is a great read on more about this topic of mixed-size concurrency. // diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm.h b/include/EASTL/internal/atomic/arch/arm/arch_arm.h index 0349a42f..cc2ce522 100644 --- a/include/EASTL/internal/atomic/arch/arm/arch_arm.h +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm.h @@ -53,12 +53,12 @@ * NOTE: * * On ARM32/64, we use the 'trailing sync;' convention with the stricter load acquire that uses - * a dmb instead of control dependencie + isb to ensure the IRIW litmus test is satisfied + * a dmb instead of a control dependency + isb to ensure the IRIW litmus test is satisfied * as one reason. See EASTL/atomic.h for futher explanation and deep-dive. * * For ARMv8 we could move to use the new proper store release and load acquire, RCsc variant. * All ARMv7 approaches work on ARMv8 and this code path is only used on msvc which isn't used - * heavily. Most of the ARM code will end up going thru clang or gcc since microsft arm devices + * heavily. Most of the ARM code will end up going thru clang or gcc since microsoft arm devices * aren't that abundant. */ diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h index a6d0e421..e3b79b84 100644 --- a/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h @@ -29,8 +29,8 @@ */ #if defined(EA_PROCESSOR_ARM32) - #define EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(ret, ptr) \ - ret = __ldrexd(ptr) + #define EASTL_ARCH_ATOMIC_ARM32_LDREXD(ret, ptr) \ + ret = __ldrexd((ptr)) #endif @@ -60,7 +60,7 @@ #define EASTL_ARCH_ATOMIC_LOAD_64(type, ret, ptr) \ { \ __int64 loadRet64; \ - EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \ + EASTL_ARCH_ATOMIC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \ \ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadRet64); \ } @@ -75,6 +75,7 @@ /** * NOTE: + * * The ARM documentation states the following: * A 64-bit pair requires the address to be quadword aligned and is single-copy atomic for each doubleword at doubleword granularity * @@ -83,22 +84,13 @@ */ #define EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, MemoryOrder) \ { \ - struct BitfieldPun128 \ - { \ - __int64 value[2]; \ - }; \ - \ - struct BitfieldPun128 loadedPun = EASTL_ATOMIC_TYPE_PUN_CAST(struct BitfieldPun128, *(ptr)); \ - \ + bool cmpxchgRetBool; \ + ret = *(ptr); \ do \ { \ - bool cmpxchgRetBool; \ - EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(struct BitfieldPun128, cmpxchgRetBool, \ - EASTL_ATOMIC_TYPE_CAST(struct BitfieldPun128, (ptr)), \ - &loadedPun, loadedPun); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, \ + ptr, &(ret), ret); \ } while (!cmpxchgRetBool); \ - \ - ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadedPun); \ } diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h index 00af29ed..c52962eb 100644 --- a/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h @@ -34,6 +34,7 @@ /** * NOTE: + * * While it makes no sense for a hardware memory barrier to not imply a compiler barrier. * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down * hard to find bugs due to the compiler deciding to reorder things. diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86.h b/include/EASTL/internal/atomic/arch/x86/arch_x86.h index 2c782cbc..5087c133 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86.h @@ -54,11 +54,14 @@ /** * NOTE: + * * On 32-bit x86 CPUs Intel Pentium and newer, AMD K5 and newer - * and any i686 class of x86 CPUs support only 64-bit cmpxchg + * and any i586 class of x86 CPUs support only 64-bit cmpxchg * known as cmpxchg8b. - * On these class of cpus we can guarantee that 64-bit loads are - * also atomic by using the SSE1/SSE2 movq instructions. + * + * On these class of cpus we can guarantee that 64-bit loads/stores are + * also atomic by using the SSE2 movq, SSE1 movlps, or x87 fild/fstp instructions. + * * We support all other atomic operations * on compilers that only provide this 64-bit cmpxchg instruction * by wrapping them around the 64-bit cmpxchg8b instruction. @@ -91,21 +94,26 @@ /** * NOTE: + * * 64-bit x64 CPUs support only 128-bit cmpxchg known as cmpxchg16b. + * * We support all other atomic operations by wrapping them around * the 128-bit cmpxchg16b instruction. - * 128-bit loads are only atomic if using cmpxchg16b on x64. + * + * 128-bit loads are only atomic by using the cmpxchg16b instruction. + * SSE 128-bit loads are not guaranteed to be atomic even though some CPUs + * make them atomic such as AMD Ryzen or Intel SandyBridge. */ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \ - static_assert(false, "EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED() must be implmented!"); + #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \ + static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!"); - #define EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET(ret, prevObserved, val) + #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val) - #define EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \ + #define EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \ { \ bool cmpxchgRet; \ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h index 064f2c01..4534806d 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h @@ -57,37 +57,37 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) + (val)) - #define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ ret = ((prevObserved) + (val)) #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h index 8c3c9327..c38ba414 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h @@ -57,37 +57,37 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) & (val)) - #define EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ ret = ((prevObserved) & (val)) #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h index 8127ccef..e028398a 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h @@ -18,49 +18,49 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \ + #define EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \ { \ /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \ __asm__ __volatile__ ("lock; cmpxchg16b %2\n" /* cmpxchg16b sets/clears ZF */ \ "sete %3" /* If ZF == 1, set the return value to 1 */ \ /* Output Operands */ \ - : "=a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "=d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)), \ + : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]), \ "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))), \ "=rm"((ret)) \ /* Input Operands */ \ - : "b"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))), "c"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)) + 1)), \ - "a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)) \ + : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[1]), \ + "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]) \ /* Clobbers */ \ : "memory", "cc"); \ } #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ - EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h index 85117a87..0f058004 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h @@ -54,7 +54,7 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \ + #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \ { \ bool cmpxchgRet; \ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ @@ -70,19 +70,19 @@ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, RELAXED) + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELAXED) #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, ACQUIRE) + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQUIRE) #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, RELEASE) + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELEASE) #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, ACQ_REL) + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQ_REL) #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, SEQ_CST) + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, SEQ_CST) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h index e78c2697..d78b3334 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h @@ -54,34 +54,34 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) + (val)) #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h index 6b81b5c0..fd7dbb9c 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h @@ -54,34 +54,34 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) & (val)) #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h index aa5bd710..50da6db7 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h @@ -54,34 +54,34 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) | (val)) #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h index 995011d9..77bee83b 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h @@ -54,34 +54,34 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) - (val)) #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h index d9126281..2e76b0c5 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h @@ -54,34 +54,34 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) ^ (val)) #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h index 444897de..b0441903 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h @@ -33,7 +33,7 @@ #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \ { \ integralType retIntegral; \ - retIntegral = *(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \ + retIntegral = (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))); \ \ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ } @@ -41,15 +41,10 @@ #endif - #define EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, MemoryOrder) \ + #define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \ { \ - struct BitfieldPun128 \ - { \ - __int64 value[2]; \ - }; \ - \ - struct BitfieldPun128 expectedPun{0, 0}; \ - ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expectedPun); \ + EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected{0, 0}; \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \ \ bool cmpxchgRetBool; EA_UNUSED(cmpxchgRetBool); \ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, ptr, &(ret), ret); \ @@ -82,7 +77,7 @@ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr) #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ - EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, RELAXED) + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED) #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ @@ -101,9 +96,8 @@ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \ EASTL_ATOMIC_COMPILER_BARRIER() - #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ - EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, ACQUIRE); \ - EASTL_ATOMIC_COMPILER_BARRIER() + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE) #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ @@ -122,9 +116,8 @@ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \ EASTL_ATOMIC_COMPILER_BARRIER() - #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ - EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, SEQ_CST); \ - EASTL_ATOMIC_COMPILER_BARRIER() + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST) #endif @@ -133,24 +126,40 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, MemoryOrder) \ + /** + * NOTE: + * + * Since the cmpxchg 128-bit inline assembly does a sete in the asm to set the return boolean, + * it doesn't get dead-store removed even though we don't care about the success of the + * cmpxchg since the compiler cannot reason about what is inside asm blocks. + * Thus this variant just does the minimum required to do an atomic load. + */ + #define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \ { \ - __uint128_t expected = 0; \ + EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected = 0; \ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \ \ - bool cmpxchgRetBool; EA_UNUSED(cmpxchgRetBool); \ - EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, ptr, &(ret), ret); \ + /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \ + __asm__ __volatile__ ("lock; cmpxchg16b %2" /* cmpxchg16b sets/clears ZF */ \ + /* Output Operands */ \ + : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \ + "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))) \ + /* Input Operands */ \ + : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \ + "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]) \ + /* Clobbers */ \ + : "memory", "cc"); \ } #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ - EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, RELAXED) + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED) #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ - EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, ACQUIRE) + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE) #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ - EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, SEQ_CST) + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h index 2007c66a..751cc2a3 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h @@ -57,37 +57,37 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) | (val)) - #define EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ ret = ((prevObserved) | (val)) #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h index 39a0c047..397ff5f8 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h @@ -29,13 +29,13 @@ { \ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ \ - *(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))) = valIntegral; \ + (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))) = valIntegral; \ } #endif - #define EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, MemoryOrder) \ + #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \ { \ type exchange128; EA_UNUSED(exchange128); \ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ @@ -68,7 +68,7 @@ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELAXED) + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED) #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ @@ -88,7 +88,7 @@ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELEASE) + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE) #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ @@ -113,7 +113,7 @@ /** * NOTE: * - * Since 64-bit exchange is wrapped around a cmpxchg8b on 32-bit, it is + * Since 64-bit exchange is wrapped around a cmpxchg8b on 32-bit x86, it is * faster to just do a mov; mfence. */ #if defined(EA_PROCESSOR_X86) @@ -139,7 +139,7 @@ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, SEQ_CST) + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST) #endif @@ -148,7 +148,7 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, MemoryOrder) \ + #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \ { \ type exchange128; EA_UNUSED(exchange128); \ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ @@ -156,13 +156,13 @@ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELAXED) + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED) #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELEASE) + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE) #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, SEQ_CST) + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h index c300816e..124b586d 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h @@ -57,37 +57,37 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) - (val)) - #define EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ ret = ((prevObserved) - (val)) #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h index 37ac843f..28cb9587 100644 --- a/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h @@ -57,37 +57,37 @@ #if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) - #define EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ ret = ((observed) ^ (val)) - #define EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ ret = ((prevObserved) ^ (val)) #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ - EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ - EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) #endif diff --git a/include/EASTL/internal/atomic/atomic.h b/include/EASTL/internal/atomic/atomic.h index 7684d6df..12486f84 100644 --- a/include/EASTL/internal/atomic/atomic.h +++ b/include/EASTL/internal/atomic/atomic.h @@ -12,6 +12,7 @@ #include +#include #include #include @@ -34,13 +35,15 @@ /** * NOTE: - * All of the actual implemention is done via the ATOMIC_MACROS in the compiler or arch sub folders. + * + * All of the actual implementation is done via the ATOMIC_MACROS in the compiler or arch sub folders. * The C++ code is merely boilerplate around these macros that actually implement the atomic operations. * The C++ boilerplate is also hidden behind macros. * This may seem more complicated but this is all meant to reduce copy-pasting and to ensure all operations * all end up going down to one macro that does the actual implementation. * The reduced code duplication makes it easier to verify the implementation and reason about it. * Ensures we do not have to re-implement the same code for compilers that do not support generic builtins such as MSVC. + * Ensures for compilers that have separate intrinsics for different widths, that C++ boilerplate isn't copy-pasted leading to programmer errors. * Ensures if we ever have to implement a new platform, only the low-level leaf macros have to be implemented, everything else will be generated for you. */ @@ -79,19 +82,10 @@ namespace internal }; - template - struct is_user_type_constrained - { - static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = - eastl::is_trivially_copyable::value && eastl::is_copy_constructible::value && eastl::is_move_constructible::value && - eastl::is_copy_assignable::value && eastl::is_move_assignable::value; - }; - - template struct is_user_type_suitable_for_primary_template { - static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = eastl::internal::is_atomic_lockfree_size::value && eastl::internal::is_user_type_constrained::value; + static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = eastl::internal::is_atomic_lockfree_size::value; }; @@ -128,8 +122,8 @@ namespace internal \ atomic(const atomic&) EA_NOEXCEPT = delete; \ \ - atomic& operator =(const atomic&) EA_NOEXCEPT = delete; \ - atomic& operator =(const atomic&) volatile EA_NOEXCEPT = delete; \ + atomic& operator=(const atomic&) EA_NOEXCEPT = delete; \ + atomic& operator=(const atomic&) volatile EA_NOEXCEPT = delete; \ \ public: /* ctors */ \ \ @@ -138,7 +132,7 @@ namespace internal { \ } \ \ - atomic() EA_NOEXCEPT = default; \ + atomic() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v) = default; \ \ public: \ \ @@ -157,57 +151,66 @@ namespace internal #define EASTL_ATOMIC_USING_ATOMIC_BASE(type) \ public: \ - using Base::operator=; \ - using Base::store; \ - using Base::load; \ - using Base::exchange; \ - using Base::compare_exchange_weak; \ - using Base::compare_exchange_strong; \ + \ + using Base::operator=; \ + using Base::store; \ + using Base::load; \ + using Base::exchange; \ + using Base::compare_exchange_weak; \ + using Base::compare_exchange_strong; \ \ public: \ \ - operator type() const volatile EA_NOEXCEPT \ - { \ - EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ - } \ + operator type() const volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ \ - operator type() const EA_NOEXCEPT \ - { \ - return load(eastl::memory_order_seq_cst); \ - } + operator type() const EA_NOEXCEPT \ + { \ + return load(eastl::memory_order_seq_cst); \ + } -#define EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() \ - public: \ - using Base::fetch_add; \ - using Base::add_fetch; \ - using Base::fetch_sub; \ - using Base::sub_fetch; \ - using Base::fetch_and; \ - using Base::and_fetch; \ - using Base::fetch_or; \ - using Base::or_fetch; \ - using Base::fetch_xor; \ - using Base::xor_fetch; \ - using Base::operator++; \ - using Base::operator--; \ - using Base::operator+=; \ - using Base::operator-=; \ - using Base::operator&=; \ - using Base::operator|=; \ - using Base::operator^=; +#define EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() \ + public: \ + \ + using Base::fetch_add; \ + using Base::add_fetch; \ + \ + using Base::fetch_sub; \ + using Base::sub_fetch; \ + \ + using Base::fetch_and; \ + using Base::and_fetch; \ + \ + using Base::fetch_or; \ + using Base::or_fetch; \ + \ + using Base::fetch_xor; \ + using Base::xor_fetch; \ + \ + using Base::operator++; \ + using Base::operator--; \ + using Base::operator+=; \ + using Base::operator-=; \ + using Base::operator&=; \ + using Base::operator|=; \ + using Base::operator^=; #define EASTL_ATOMIC_USING_ATOMIC_POINTER() \ public: \ - using Base::fetch_add; \ - using Base::add_fetch; \ - using Base::fetch_sub; \ - using Base::sub_fetch; \ - using Base::operator++; \ - using Base::operator--; \ - using Base::operator+=; \ - using Base::operator-=; + \ + using Base::fetch_add; \ + using Base::add_fetch; \ + using Base::fetch_sub; \ + using Base::sub_fetch; \ + \ + using Base::operator++; \ + using Base::operator--; \ + using Base::operator+=; \ + using Base::operator-=; template diff --git a/include/EASTL/internal/atomic/atomic_asserts.h b/include/EASTL/internal/atomic/atomic_asserts.h index c561ccb0..9324a479 100644 --- a/include/EASTL/internal/atomic/atomic_asserts.h +++ b/include/EASTL/internal/atomic/atomic_asserts.h @@ -18,17 +18,19 @@ static_assert(!eastl::is_same::value, "eastl::atomic : invalid memory order for the given operation!"); #define EASTL_ATOMIC_STATIC_ASSERT_TYPE(type) \ + /* User Provided T must not be cv qualified */ \ static_assert(!eastl::is_const::value, "eastl::atomic : Template Typename T cannot be const!"); \ static_assert(!eastl::is_volatile::value, "eastl::atomic : Template Typename T cannot be volatile! Use the memory orders to access the underlying type for the guarantees you need."); \ - static_assert(eastl::is_trivially_destructible::value, "eastl::atomic : Must be trivially destructible!"); \ + /* T must satisfy StandardLayoutType */ \ static_assert(eastl::is_standard_layout::value, "eastl::atomic : Must have standard layout!"); \ + /* T must be TriviallyCopyable but it does not have to be TriviallyConstructible */ \ + static_assert(eastl::is_trivially_copyable::value, "eastl::atomci : Template Typename T must be trivially copyable!"); \ static_assert(eastl::is_copy_constructible::value, "eastl::atomic : Template Typename T must be copy constructible!"); \ static_assert(eastl::is_move_constructible::value, "eastl::atomic : Template Typename T must be move constructible!"); \ static_assert(eastl::is_copy_assignable::value, "eastl::atomic : Template Typename T must be copy assignable!"); \ static_assert(eastl::is_move_assignable::value, "eastl::atomic : Template Typename T must be move assignable!"); \ - static_assert(eastl::internal::is_atomic_lockfree_size::value, "eastl::atomic : Template Typename T must be a lockfree size!"); \ - static_assert(eastl::is_trivially_copyable::value, "eastl::atomci : Template Typename T must be trivially copyable!"); \ - static_assert(eastl::is_nothrow_default_constructible::value, "eastl::atomic : Template Typename T must be nothrow default constructible!"); + static_assert(eastl::is_trivially_destructible::value, "eastl::atomic : Must be trivially destructible!"); \ + static_assert(eastl::internal::is_atomic_lockfree_size::value, "eastl::atomic : Template Typename T must be a lockfree size!"); #define EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(type) \ static_assert(eastl::is_object::value, "eastl::atomic : Template Typename T must be an object type!"); @@ -50,14 +52,17 @@ namespace internal struct atomic_invalid_type { /** - * class Test { int i; int j; int k; }; sizeof(Test2) == 96 bits + * class Test { int i; int j; int k; }; sizeof(Test) == 96 bits * * std::atomic allows non-primitive types to be used for the template type. * This causes the api to degrade to locking for types that cannot fit into the lockfree size * of the target platform such as std::atomic leading to performance traps. + * + * If this static_assert() fired, this means your template type T is larger than any atomic instruction + * supported on the given platform. */ - static_assert(!eastl::is_same::value, "eastl::atomic invalid template type T!"); + static_assert(!eastl::is_same::value, "eastl::atomic : invalid template type T!"); }; diff --git a/include/EASTL/internal/atomic/atomic_base_width.h b/include/EASTL/internal/atomic/atomic_base_width.h index d88d6b85..3b32e561 100644 --- a/include/EASTL/internal/atomic/atomic_base_width.h +++ b/include/EASTL/internal/atomic/atomic_base_width.h @@ -25,27 +25,58 @@ namespace internal template struct atomic_base_width; + /** + * NOTE: + * + * T does not have to be trivially default constructible but it still + * has to be a trivially copyable type for the primary atomic template. + * Thus we must type pun into whatever storage type of the given fixed width + * the platform designates. This ensures T does not have to be trivially constructible. + */ + +#define EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) \ + EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_FIXED_WIDTH_TYPE_, bits) + #define EASTL_ATOMIC_STORE_FUNC_IMPL(op, bits) \ - EA_PREPROCESSOR_JOIN(op, bits)(T, this->GetAtomicAddress(), desired); + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \ + fixedWidthDesired) + + +#define EASTL_ATOMIC_LOAD_FUNC_IMPL(op, bits) \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + retVal, \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress())); \ + return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal); -#define EASTL_ATOMIC_LOAD_FUNC_IMPL(op, bits) \ - T retVal; \ - EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress()); \ - return retVal; #define EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(op, bits) \ - T retVal; \ - EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), desired); \ - return retVal; + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + retVal, \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \ + fixedWidthDesired); \ + return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal); + #define EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(op, bits) \ bool retVal; \ - EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), &expected, desired); \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + retVal, \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), &expected), \ + fixedWidthDesired); \ return retVal; -#define EASTL_ATOMIC_BASE_OP_JOIN(fetchOp, Order) \ - EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order) + +#define EASTL_ATOMIC_BASE_OP_JOIN(op, Order) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, op), Order) + #define EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(funcName, cmpxchgOp, bits) \ using Base::funcName; \ @@ -172,7 +203,7 @@ namespace internal { \ } \ \ - atomic_base_width() EA_NOEXCEPT = default; \ + atomic_base_width() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v) = default; \ \ atomic_base_width(const atomic_base_width&) EA_NOEXCEPT = delete; \ \ @@ -270,14 +301,14 @@ namespace internal \ using Base::operator=; \ \ - T operator =(T desired) EA_NOEXCEPT \ + T operator=(T desired) EA_NOEXCEPT \ { \ store(desired, eastl::memory_order_seq_cst); \ return desired; \ } \ \ - atomic_base_width& operator =(const atomic_base_width&) EA_NOEXCEPT = delete; \ - atomic_base_width& operator =(const atomic_base_width&) volatile EA_NOEXCEPT = delete; \ + atomic_base_width& operator=(const atomic_base_width&) EA_NOEXCEPT = delete; \ + atomic_base_width& operator=(const atomic_base_width&) volatile EA_NOEXCEPT = delete; \ \ }; diff --git a/include/EASTL/internal/atomic/atomic_casts.h b/include/EASTL/internal/atomic/atomic_casts.h index 303d4b05..84c1fac8 100644 --- a/include/EASTL/internal/atomic/atomic_casts.h +++ b/include/EASTL/internal/atomic/atomic_casts.h @@ -11,6 +11,9 @@ #endif +#include + + #include @@ -31,6 +34,7 @@ EASTL_FORCE_INLINE volatile T* AtomicVolatileCast(T* ptr) EA_NOEXCEPT return reinterpret_cast(ptr); } + /** * NOTE: * @@ -38,13 +42,17 @@ EASTL_FORCE_INLINE volatile T* AtomicVolatileCast(T* ptr) EA_NOEXCEPT * doing atomic operations on pointers must be casted to the suitable * sized unsigned integral type. * + * Some compiler intrinsics aren't generics and thus structs must also + * be casted to the appropriate sized unsigned integral type. + * * Atomic operations on an int* might have to be casted to a uint64_t on * a platform with 8-byte pointers as an example. * * Also doing an atomic operation on a struct, we must ensure that we observe * the whole struct as one atomic unit with no shearing between the members. * A load of a struct with two uint32_t members must be one uint64_t load, - * not two separate uint32_t loads. + * not two separate uint32_t loads, thus casted to the suitable sized + * unsigned integral type. */ template EASTL_FORCE_INLINE volatile Integral* AtomicVolatileIntegralCast(T* ptr) EA_NOEXCEPT @@ -109,16 +117,27 @@ EASTL_FORCE_INLINE ToType* AtomicTypeCast(FromType* ptr) EA_NOEXCEPT * This can be implemented in many different ways depending on the compiler such * as thru a union, memcpy, reinterpret_cast(atomicLoad), etc. */ -template +template , int> = 0> EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT { static_assert(sizeof(Pun) == sizeof(T), "eastl::atomic : Pun and T must be the same size for type punning!"); - Pun ret; + /** + * aligned_storage ensures we can TypePun objects that aren't trivially default constructible + * but still trivially copyable. + */ + typename eastl::aligned_storage::type ret; memcpy(eastl::addressof(ret), eastl::addressof(fromType), sizeof(Pun)); - return ret; + return reinterpret_cast(ret); } +template , int> = 0> +EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT +{ + return fromType; +} + + template EASTL_FORCE_INLINE T AtomicNegateOperand(T val) EA_NOEXCEPT { diff --git a/include/EASTL/internal/atomic/atomic_flag.h b/include/EASTL/internal/atomic/atomic_flag.h index f71b4def..52b2b1c0 100644 --- a/include/EASTL/internal/atomic/atomic_flag.h +++ b/include/EASTL/internal/atomic/atomic_flag.h @@ -36,8 +36,8 @@ class atomic_flag atomic_flag(const atomic_flag&) EA_NOEXCEPT = delete; - atomic_flag& operator =(const atomic_flag&) EA_NOEXCEPT = delete; - atomic_flag& operator =(const atomic_flag&) volatile EA_NOEXCEPT = delete; + atomic_flag& operator=(const atomic_flag&) EA_NOEXCEPT = delete; + atomic_flag& operator=(const atomic_flag&) volatile EA_NOEXCEPT = delete; public: /* clear */ diff --git a/include/EASTL/internal/atomic/atomic_integral.h b/include/EASTL/internal/atomic/atomic_integral.h index c1414446..060b5b87 100644 --- a/include/EASTL/internal/atomic/atomic_integral.h +++ b/include/EASTL/internal/atomic/atomic_integral.h @@ -80,10 +80,10 @@ namespace internal public: /* assignment operator */ - using Base::operator =; + using Base::operator=; - atomic_integral_base& operator =(const atomic_integral_base&) EA_NOEXCEPT = delete; - atomic_integral_base& operator =(const atomic_integral_base&) volatile EA_NOEXCEPT = delete; + atomic_integral_base& operator=(const atomic_integral_base&) EA_NOEXCEPT = delete; + atomic_integral_base& operator=(const atomic_integral_base&) volatile EA_NOEXCEPT = delete; public: /* fetch_add */ @@ -238,10 +238,10 @@ namespace internal \ public: /* assignment operator */ \ \ - using Base::operator =; \ + using Base::operator=; \ \ - atomic_integral_width& operator =(const atomic_integral_width&) EA_NOEXCEPT = delete; \ - atomic_integral_width& operator =(const atomic_integral_width&) volatile EA_NOEXCEPT = delete; \ + atomic_integral_width& operator=(const atomic_integral_width&) EA_NOEXCEPT = delete; \ + atomic_integral_width& operator=(const atomic_integral_width&) volatile EA_NOEXCEPT = delete; \ \ public: /* fetch_add */ \ \ diff --git a/include/EASTL/internal/atomic/atomic_macros.h b/include/EASTL/internal/atomic/atomic_macros.h index 8bdcc330..756a4b4d 100644 --- a/include/EASTL/internal/atomic/atomic_macros.h +++ b/include/EASTL/internal/atomic/atomic_macros.h @@ -43,13 +43,19 @@ // in various platform implementations. // // 2. Allows for the implementation to be implemented efficiently on compilers that do not -// directly implement the C++ memory model such as msvc. +// directly implement the C++ memory model in their intrinsics such as msvc. // // 3. Allows for the implementation of atomics that may not be supported on the given platform, // such as 128-bit atomics on 32-bit platforms since the macros will only ever be expanded // on platforms that support said features. This makes implementing said features pretty easy // since we do not have to worry about complicated feature detection in the low level implementations. // +// The macro implementation may asume that all passed in types are trivially constructible thus it is +// free to create local variables of the passed in types as it may please. +// It may also assume that all passed in types are trivially copyable as well. +// It cannot assume any passed in type is any given type thus is a specific type if needed, it must do an +// EASTL_ATOMIC_TYPE_PUN_CAST() to the required type. +// #include "compiler/compiler.h" diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h index d1defe9a..941ac51c 100644 --- a/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h @@ -84,4 +84,62 @@ #endif +///////////////////////////////////////////////////////////////////////////////// + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 + +#endif + + #endif /* EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h index b9055d1b..f03720d9 100644 --- a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h @@ -48,7 +48,7 @@ // EASTL_ATOMIC_CHOOSE_OP_IMPL // // This macro chooses between the compiler or architecture implementation for a -// given atomic operation +// given atomic operation. // // USAGE: // diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h index 9a7e818c..14f7be92 100644 --- a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h @@ -7,7 +7,7 @@ #define EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H #if defined(EA_PRAGMA_ONCE_SUPPORTED) -#pragma once + #pragma once #endif diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h index f3df54ec..c9ebd6e3 100644 --- a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h @@ -10,6 +10,7 @@ #pragma once #endif + ///////////////////////////////////////////////////////////////////////////////// // // void EASTL_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) diff --git a/include/EASTL/internal/atomic/atomic_pointer.h b/include/EASTL/internal/atomic/atomic_pointer.h index 93482791..38a4647a 100644 --- a/include/EASTL/internal/atomic/atomic_pointer.h +++ b/include/EASTL/internal/atomic/atomic_pointer.h @@ -77,12 +77,14 @@ namespace internal atomic_pointer_base() EA_NOEXCEPT = default; + atomic_pointer_base(const atomic_pointer_base&) EA_NOEXCEPT = delete; + public: /* assignment operators */ - using Base::operator =; + using Base::operator=; - atomic_pointer_base& operator =(const atomic_pointer_base&) EA_NOEXCEPT = delete; - atomic_pointer_base& operator =(const atomic_pointer_base&) volatile EA_NOEXCEPT = delete; + atomic_pointer_base& operator=(const atomic_pointer_base&) EA_NOEXCEPT = delete; + atomic_pointer_base& operator=(const atomic_pointer_base&) volatile EA_NOEXCEPT = delete; public: /* fetch_add */ @@ -208,12 +210,14 @@ namespace internal \ atomic_pointer_width() EA_NOEXCEPT = default; \ \ + atomic_pointer_width(const atomic_pointer_width&) EA_NOEXCEPT = delete; \ + \ public: /* assignment operators */ \ \ - using Base::operator =; \ + using Base::operator=; \ \ - atomic_pointer_width& operator =(const atomic_pointer_width&) EA_NOEXCEPT = delete; \ - atomic_pointer_width& operator =(const atomic_pointer_width&) volatile EA_NOEXCEPT = delete; \ + atomic_pointer_width& operator=(const atomic_pointer_width&) EA_NOEXCEPT = delete; \ + atomic_pointer_width& operator=(const atomic_pointer_width&) volatile EA_NOEXCEPT = delete; \ \ public: /* fetch_add */ \ \ diff --git a/include/EASTL/internal/atomic/atomic_size_aligned.h b/include/EASTL/internal/atomic/atomic_size_aligned.h index c76f9834..2043ae22 100644 --- a/include/EASTL/internal/atomic/atomic_size_aligned.h +++ b/include/EASTL/internal/atomic/atomic_size_aligned.h @@ -81,8 +81,8 @@ namespace internal EASTL_ATOMIC_ASSERT_ALIGNED(sizeof(T)); } - atomic_size_aligned() EA_NOEXCEPT - : mAtomic{} /* Zero-Initialized */ + atomic_size_aligned() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v) + : mAtomic{} /* Value-Initialize which will Zero-Initialize Trivial Constructible types */ { EASTL_ATOMIC_ASSERT_ALIGNED(sizeof(T)); } @@ -156,13 +156,13 @@ namespace internal public: /* assignment operator */ - T operator =(T desired) volatile EA_NOEXCEPT + T operator=(T desired) volatile EA_NOEXCEPT { EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); } - atomic_size_aligned& operator =(const atomic_size_aligned&) EA_NOEXCEPT = delete; - atomic_size_aligned& operator =(const atomic_size_aligned&) volatile EA_NOEXCEPT = delete; + atomic_size_aligned& operator=(const atomic_size_aligned&) EA_NOEXCEPT = delete; + atomic_size_aligned& operator=(const atomic_size_aligned&) volatile EA_NOEXCEPT = delete; protected: /* Accessors */ diff --git a/include/EASTL/internal/atomic/compiler/compiler.h b/include/EASTL/internal/atomic/compiler/compiler.h index 2fd220ca..65a4cd00 100644 --- a/include/EASTL/internal/atomic/compiler/compiler.h +++ b/include/EASTL/internal/atomic/compiler/compiler.h @@ -28,15 +28,18 @@ ///////////////////////////////////////////////////////////////////////////////// + namespace eastl { + namespace internal { /** * NOTE: + * * This can be used by specific compiler implementations to implement a data dependency compiler barrier. * Some compiler barriers do not take in input dependencies as is possible with the gcc asm syntax. * Thus we need a way to create a false dependency on the input variable so the compiler does not dead-store @@ -74,6 +77,7 @@ extern EASTL_API volatile CompilerBarrierDataDependencyFuncPtr gCompilerBarrierD } // namespace internal + } // namespace eastl diff --git a/include/EASTL/internal/atomic/compiler/compiler_load.h b/include/EASTL/internal/atomic/compiler/compiler_load.h index 0c76b6bc..734dbb80 100644 --- a/include/EASTL/internal/atomic/compiler/compiler_load.h +++ b/include/EASTL/internal/atomic/compiler/compiler_load.h @@ -118,11 +118,19 @@ * This implementation assumes the hardware doesn't reorder dependent * loads unlike the DEC Alpha. */ +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) \ + { \ + static_assert(eastl::is_pointer_v, "eastl::atomic : Read Depends Type must be a Pointer Type!"); \ + static_assert(eastl::is_pointer_v>, "eastl::atomic : Read Depends Ptr must be a Pointer to a Pointer!"); \ + \ + ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)); \ + } + #define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \ - ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)) \ + EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) #define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \ - ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)) \ + EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) #define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1 #define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1 diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h index 4b74f9b3..26a99c20 100644 --- a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h @@ -13,6 +13,7 @@ /** * NOTE: + * * gcc __atomic builtins may defer to function calls in libatomic.so for architectures that do not * support atomic instructions of a given size. These functions will be implemented with pthread_mutex_t. * It also requires the explicit linking against the compiler runtime libatomic.so. @@ -20,7 +21,7 @@ * or on classic uniprocessor systems just disable interrupts. * * We do not want to have to link against libatomic.so or fall into the trap of our atomics degrading - * into locks. We would rather have user-code explicity use locking primitives if their code cannot + * into locks. We would rather have user-code explicitly use locking primitives if their code cannot * be satisfied with atomic instructions on the given platform. */ static_assert(__atomic_always_lock_free(1, 0), "eastl::atomic where sizeof(T) == 1 must be lock-free!"); @@ -34,7 +35,7 @@ static_assert(__atomic_always_lock_free(4, 0), "eastl::atomic where sizeof(T) * NOTE: * * The following can fail on gcc/clang on 64-bit systems. - * Firstly, it depends on the -march setting on clang whether or not it calls out to libatomic. + * Firstly, it depends on the -march setting on clang whether or not it calls out to libatomic for 128-bit operations. * Second, gcc always calls out to libatomic for 128-bit atomics. It is unclear if it uses locks * or tries to look at the cpuid and use cmpxchg16b if its available. * gcc mailing lists argue that since load must be implemented with cmpxchg16b, then the __atomic bultin @@ -43,21 +44,25 @@ static_assert(__atomic_always_lock_free(4, 0), "eastl::atomic where sizeof(T) * We don't care about the read-only restriction because our eastl::atomic object is mutable * and also msvc doesn't enforce this restriction thus to be fully platform agnostic we cannot either. * - * Therefore, the follow static_assert is commented out for the time being. + * Therefore, the follow static_assert is commented out for the time being, as it always fails on these compilers. + * We still guarantee 128-bit atomics are lock-free by handrolling the inline assembly ourselves. * * static_assert(__atomic_always_lock_free(16, 0), "eastl::atomic where sizeof(T) == 16 must be lock-free!"); */ /** * NOTE: - * Why we do the cast to the unsigned fixed width types for every operation even though gcc/clang builtins are generics? + * + * Why do we do the cast to the unsigned fixed width types for every operation even though gcc/clang builtins are generics? * Well gcc/clang correctly-incorrectly call out to libatomic and do locking on user types that may be potentially misaligned. - * struct UserType { uint8_t a,b; }; This given struct is 16 bytes in size but has only 8 byte alignment. + * struct UserType { uint8_t a,b; }; This given struct is 2 bytes in size but has only 1 byte alignment. * gcc/clang cannot and doesn't know that we always guarantee every type T is size aligned within eastl::atomic. * Therefore it always emits calls into libatomic and does locking for structs like these which we do not want. - * Therefore you'll notice we always cast each atomic ptr type to the equivalent unsigned width type when doing the atomic operations. + * Therefore you'll notice we always cast each atomic ptr type to the equivalent unsigned fixed width type when doing the atomic operations. + * This ensures all user types are size aligned and thus are lock free. */ + ///////////////////////////////////////////////////////////////////////////////// @@ -74,6 +79,16 @@ static_assert(__atomic_always_lock_free(4, 0), "eastl::atomic where sizeof(T) ///////////////////////////////////////////////////////////////////////////////// +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 uint8_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 uint16_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 uint32_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 uint64_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 __uint128_t + + +///////////////////////////////////////////////////////////////////////////////// + + #define EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, gccMemoryOrder) \ { \ integralType retIntegral; \ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h index 9920fe9f..64e8e541 100644 --- a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h @@ -28,4 +28,3 @@ #endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H */ - diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h index 3e9d533b..90b1ff5a 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h @@ -33,12 +33,41 @@ EA_RESTORE_ALL_VC_WARNINGS(); ///////////////////////////////////////////////////////////////////////////////// +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 char +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 short +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 long +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 __int64 + +namespace eastl +{ + +namespace internal +{ + +struct FixedWidth128 +{ + __int64 value[2]; +}; + +} // namespace internal + +} // namespace eastl + +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 eastl::internal::FixedWidth128 + + +///////////////////////////////////////////////////////////////////////////////// + + /** * NOTE: + * * Unfortunately MSVC Intrinsics depend on the architecture * that we are compiling for. * These are some indirection macros to make our lives easier and * ensure the least possible amount of copy-paste to reduce programmer errors. + * + * All compiler implementations end up deferring to the below macros. */ #if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) @@ -53,7 +82,7 @@ EA_RESTORE_ALL_VC_WARNINGS(); ret = Intrinsic(ptr, exchange, comparand) #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \ - ret = _InterlockedCompareExchange128(ptr, exchangeHigh, exchangeLow, comparandResult) + ret = _InterlockedCompareExchange128_np(ptr, exchangeHigh, exchangeLow, comparandResult) #elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h index f7f0c39b..12fc4b04 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h @@ -29,7 +29,7 @@ EASTL_MSVC_ATOMIC_ADD_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_ADD_FETCH_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_ADD_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h index 66f89ef2..70ec577f 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h @@ -11,6 +11,23 @@ #endif +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8_np + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16_np + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd_np + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64_np + +#else + + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8 + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16 + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64 + +#endif + + #define EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE(ret, val, andend) \ ret = (val) & (andend) @@ -20,16 +37,16 @@ #define EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_AND_FETCH_N(char, _InterlockedAnd8, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_AND_FETCH_N(char, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_AND_FETCH_N(short, _InterlockedAnd16, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_AND_FETCH_N(short, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_AND_FETCH_N(long, _InterlockedAnd, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_AND_FETCH_N(long, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_AND_FETCH_N(long long, _InterlockedAnd64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_AND_FETCH_N(__int64, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h index 427d3498..42117a1a 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h @@ -11,17 +11,34 @@ #endif +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8 + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16_np + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange_np + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64_np + +#else + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8 + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16 + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64 + +#endif + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, MemoryOrder) \ - EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(char, _InterlockedCompareExchange8, type, ret, ptr, expected, desired, MemoryOrder) + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(char, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8, type, ret, ptr, expected, desired, MemoryOrder) #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, MemoryOrder) \ - EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(short, _InterlockedCompareExchange16, type, ret, ptr, expected, desired, MemoryOrder) + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(short, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16, type, ret, ptr, expected, desired, MemoryOrder) #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, MemoryOrder) \ - EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long, _InterlockedCompareExchange, type, ret, ptr, expected, desired, MemoryOrder) + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32, type, ret, ptr, expected, desired, MemoryOrder) #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, MemoryOrder) \ - EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long long, _InterlockedCompareExchange64, type, ret, ptr, expected, desired, MemoryOrder) + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(__int64, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64, type, ret, ptr, expected, desired, MemoryOrder) #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, MemoryOrder) \ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h index 93055e54..323f1fae 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h @@ -21,7 +21,7 @@ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(long, _InterlockedExchange, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(long long, _InterlockedExchange64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(__int64, _InterlockedExchange64, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \ { \ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h index 4cb05874..a951740e 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h @@ -26,7 +26,7 @@ EASTL_MSVC_ATOMIC_FETCH_ADD_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_ADD_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_ADD_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h index c04f86df..96f78942 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h @@ -11,22 +11,39 @@ #endif +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8_np + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16_np + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd_np + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64_np + +#else + + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8 + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16 + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64 + +#endif + + #define EASTL_MSVC_ATOMIC_FETCH_AND_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) #define EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_AND_N(char, _InterlockedAnd8, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_AND_N(char, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_AND_N(short, _InterlockedAnd16, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_AND_N(short, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_AND_N(long, _InterlockedAnd, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_AND_N(long, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_AND_N(long long, _InterlockedAnd64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_AND_N(__int64, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h index a592bdff..2792fc3d 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h @@ -11,22 +11,39 @@ #endif +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8_np + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16_np + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr_np + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64_np + +#else + + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8 + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16 + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64 + +#endif + + #define EASTL_MSVC_ATOMIC_FETCH_OR_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) #define EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_OR_N(char, _InterlockedOr8, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_OR_N(char, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_OR_N(short, _InterlockedOr16, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_OR_N(short, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_OR_N(long, _InterlockedOr, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_OR_N(long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_OR_N(long long, _InterlockedOr64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_OR_N(long long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h index 25f41f97..6d5d9e3a 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h @@ -29,7 +29,7 @@ EASTL_MSVC_ATOMIC_FETCH_SUB_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_SUB_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_SUB_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h index 7402e20d..371153e9 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h @@ -11,22 +11,39 @@ #endif +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8_np + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16_np + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor_np + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64_np + +#else + + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8 + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16 + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64 + +#endif + + #define EASTL_MSVC_ATOMIC_FETCH_XOR_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) #define EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_XOR_N(char, _InterlockedXor8, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_XOR_N(char, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_XOR_N(short, _InterlockedXor16, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_XOR_N(short, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_XOR_N(long, _InterlockedXor, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_XOR_N(long, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_FETCH_XOR_N(long long, _InterlockedXor64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_FETCH_XOR_N(__int64, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h index fe4218a7..c5b5fac3 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h @@ -11,6 +11,23 @@ #endif +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8_np + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16_np + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr_np + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64_np + +#else + + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8 + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16 + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64 + +#endif + + #define EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE(ret, val, orend) \ ret = (val) | (orend) @@ -20,16 +37,16 @@ #define EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_OR_FETCH_N(char, _InterlockedOr8, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_OR_FETCH_N(char, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_OR_FETCH_N(short, _InterlockedOr16, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_OR_FETCH_N(short, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_OR_FETCH_N(long, _InterlockedOr, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_OR_FETCH_N(long, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_OR_FETCH_N(long long, _InterlockedOr64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_OR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h index 97be65d1..6fb61e29 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h @@ -32,7 +32,7 @@ EASTL_MSVC_ATOMIC_SUB_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_SUB_FETCH_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_SUB_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h index 61409b81..44ffff90 100644 --- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h @@ -11,6 +11,23 @@ #endif +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8_np + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16_np + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor_np + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64_np + +#else + + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8 + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16 + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64 + +#endif + + #define EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE(ret, val, xorend) \ ret = (val) ^ (xorend) @@ -20,16 +37,16 @@ #define EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_XOR_FETCH_N(char, _InterlockedXor8, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_XOR_FETCH_N(char, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_XOR_FETCH_N(short, _InterlockedXor16, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_XOR_FETCH_N(short, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_XOR_FETCH_N(long, _InterlockedXor, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_XOR_FETCH_N(long, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder) #define EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, MemoryOrder) \ - EASTL_MSVC_ATOMIC_XOR_FETCH_N(long long, _InterlockedXor64, type, ret, ptr, val, MemoryOrder) + EASTL_MSVC_ATOMIC_XOR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder) ///////////////////////////////////////////////////////////////////////////////// diff --git a/include/EASTL/internal/config.h b/include/EASTL/internal/config.h index c41326ee..e06e530f 100644 --- a/include/EASTL/internal/config.h +++ b/include/EASTL/internal/config.h @@ -147,7 +147,7 @@ #define EA_CPP14_CONSTEXPR constexpr #else #define EA_CPP14_CONSTEXPR // not supported - #define EA_NO_CPP14_CONSTEXPR + #define EA_NO_CPP14_CONSTEXPR #endif #endif @@ -245,11 +245,11 @@ namespace eastl /////////////////////////////////////////////////////////////////////////////// // EASTL_IF_NOT_DLL // -// Utility to include expressions only for static builds. +// Utility to include expressions only for static builds. // #ifndef EASTL_IF_NOT_DLL #if EASTL_DLL - #define EASTL_IF_NOT_DLL(x) + #define EASTL_IF_NOT_DLL(x) #else #define EASTL_IF_NOT_DLL(x) x #endif @@ -1850,7 +1850,7 @@ typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept /// EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR -/// This feature define allows users to toggle the problematic eastl::pair implicit +/// This feature define allows users to toggle the problematic eastl::pair implicit /// single element constructor. #ifndef EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR #define EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR 0 diff --git a/include/EASTL/internal/red_black_tree.h b/include/EASTL/internal/red_black_tree.h index 76fda09f..8e9eda07 100644 --- a/include/EASTL/internal/red_black_tree.h +++ b/include/EASTL/internal/red_black_tree.h @@ -836,7 +836,7 @@ namespace eastl { #endif for(; first != last; ++first) - insert(eastl::move(*first)); + insert(*first); #if EASTL_EXCEPTIONS_ENABLED } catch(...) diff --git a/test/source/TestArray.cpp b/test/source/TestArray.cpp index 02f1d852..3db95b95 100644 --- a/test/source/TestArray.cpp +++ b/test/source/TestArray.cpp @@ -18,12 +18,14 @@ using namespace eastl; template struct eastl::array; template struct eastl::array; // VC++ fails to compile due to error generated by the swap function. C2718: http://msdn.microsoft.com/en-us/library/vstudio/sxe76d9e.aspx +template class TP; + int TestArray() { int nErrorCount = 0; - { + { array a = { { 0, 1, 2, 3, 4 } }; array b = { { 0, 1, 2, 3 } }; array c = { { 4, 3, 2, 1, 0 } }; @@ -74,7 +76,7 @@ int TestArray() array::reverse_iterator itr = a.rbegin(); VERIFY((a.validate_iterator(itr.base()) & (isf_valid | isf_can_dereference)) != 0); VERIFY(*itr == 0); - + itr++; VERIFY(*itr == 1); @@ -193,6 +195,111 @@ int TestArray() #endif } + // to_array + { + { + constexpr int c_array[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + constexpr auto arr = to_array(c_array); + + static_assert(is_same_v, eastl::array>, "unexpected return type"); + + static_assert(arr[0] == 0, "unexpected array value"); + static_assert(arr[1] == 1, "unexpected array value"); + static_assert(arr[2] == 2, "unexpected array value"); + static_assert(arr[3] == 3, "unexpected array value"); + static_assert(arr[4] == 4, "unexpected array value"); + static_assert(arr[5] == 5, "unexpected array value"); + static_assert(arr[6] == 6, "unexpected array value"); + static_assert(arr[7] == 7, "unexpected array value"); + static_assert(arr[8] == 8, "unexpected array value"); + static_assert(arr[9] == 9, "unexpected array value"); + } + + { + constexpr auto arr = to_array({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + + static_assert(is_same_v, eastl::array>, "unexpected return type"); + + static_assert(arr[0] == 0, "unexpected array value"); + static_assert(arr[1] == 1, "unexpected array value"); + static_assert(arr[2] == 2, "unexpected array value"); + static_assert(arr[3] == 3, "unexpected array value"); + static_assert(arr[4] == 4, "unexpected array value"); + static_assert(arr[5] == 5, "unexpected array value"); + static_assert(arr[6] == 6, "unexpected array value"); + static_assert(arr[7] == 7, "unexpected array value"); + static_assert(arr[8] == 8, "unexpected array value"); + static_assert(arr[9] == 9, "unexpected array value"); + } + + { + constexpr auto arr = to_array({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + + static_assert(is_same_v, eastl::array>, "unexpected return type"); + + static_assert(arr[0] == 0l, "unexpected array value"); + static_assert(arr[1] == 1l, "unexpected array value"); + static_assert(arr[2] == 2l, "unexpected array value"); + static_assert(arr[3] == 3l, "unexpected array value"); + static_assert(arr[4] == 4l, "unexpected array value"); + static_assert(arr[5] == 5l, "unexpected array value"); + static_assert(arr[6] == 6l, "unexpected array value"); + static_assert(arr[7] == 7l, "unexpected array value"); + static_assert(arr[8] == 8l, "unexpected array value"); + static_assert(arr[9] == 9l, "unexpected array value"); + } + + { + constexpr auto arr = to_array({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + + static_assert(is_same_v, eastl::array>, "unexpected return type"); + + static_assert(arr[0] == 0ul, "unexpected array value"); + static_assert(arr[1] == 1ul, "unexpected array value"); + static_assert(arr[2] == 2ul, "unexpected array value"); + static_assert(arr[3] == 3ul, "unexpected array value"); + static_assert(arr[4] == 4ul, "unexpected array value"); + static_assert(arr[5] == 5ul, "unexpected array value"); + static_assert(arr[6] == 6ul, "unexpected array value"); + static_assert(arr[7] == 7ul, "unexpected array value"); + static_assert(arr[8] == 8ul, "unexpected array value"); + static_assert(arr[9] == 9ul, "unexpected array value"); + } + + { + constexpr auto arr = to_array("EASTL"); + + static_assert(is_same_v, eastl::array>, "unexpected return type"); + + static_assert(arr[0] == 'E', "unexpected value in array"); + static_assert(arr[1] == 'A', "unexpected value in array"); + static_assert(arr[2] == 'S', "unexpected value in array"); + static_assert(arr[3] == 'T', "unexpected value in array"); + static_assert(arr[4] == 'L', "unexpected value in array"); + } + + // Older Microsoft compilers don't implement guaranteed copy ellision which is problematic when dealing with + // non-copyable types. We disable this test unless we are on a version of MSVC with those features. + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // VS2019 16.0+ + { + struct LocalNonCopyable + { + LocalNonCopyable() = default; + ~LocalNonCopyable() = default; + + LocalNonCopyable(LocalNonCopyable&&) = default; + LocalNonCopyable& operator=(LocalNonCopyable&&) = default; + + LocalNonCopyable(const LocalNonCopyable&) = delete; + LocalNonCopyable& operator=(const LocalNonCopyable&) = delete; + }; + + constexpr auto arr = to_array({LocalNonCopyable{}}); + static_assert(arr.size() == 1, "unexpected error"); + } + #endif + } + return nErrorCount; } diff --git a/test/source/TestAtomicAsm.cpp b/test/source/TestAtomicAsm.cpp index c91d28b6..d4db04e6 100644 --- a/test/source/TestAtomicAsm.cpp +++ b/test/source/TestAtomicAsm.cpp @@ -4417,6 +4417,52 @@ EA_NO_INLINE static int TestAtomicReadDependsIntrusive() return a + b + c + d; } +#if defined(EASTL_ATOMIC_HAS_32BIT) + +EA_NO_INLINE static void TestAtomic32LoadStoreSameAddressSeqCst() +{ + eastl::atomic atomic{0}; + + uint32_t ret1 = atomic.load(eastl::memory_order_relaxed); + + atomic.store(4, eastl::memory_order_relaxed); + + uint32_t ret2 = atomic.load(eastl::memory_order_relaxed); + + uint32_t ret3 = atomic.load(eastl::memory_order_relaxed); + + atomic.store(5, eastl::memory_order_relaxed); + + eastl::compiler_barrier_data_dependency(ret1); + eastl::compiler_barrier_data_dependency(ret2); + eastl::compiler_barrier_data_dependency(ret3); +} + +#endif + +#if defined(EASTL_ATOMIC_HAS_128BIT) + +EA_NO_INLINE static void TestAtomic128LoadStoreSameAddressSeqCst() +{ + eastl::atomic atomic{UserType128{0, 0, 0, 0}}; + + UserType128 ret1 = atomic.load(eastl::memory_order_relaxed); + + atomic.store(UserType128{1, 0, 2, 4}, eastl::memory_order_relaxed); + + UserType128 ret2 = atomic.load(eastl::memory_order_relaxed); + + UserType128 ret3 = atomic.load(eastl::memory_order_relaxed); + + atomic.store(UserType128{1, 1, 2, 4}, eastl::memory_order_relaxed); + + eastl::compiler_barrier_data_dependency(ret1); + eastl::compiler_barrier_data_dependency(ret2); + eastl::compiler_barrier_data_dependency(ret3); +} + +#endif + int TestAtomicAsm() { int nErrorCount = 0; @@ -4859,5 +4905,17 @@ int TestAtomicAsm() TestCompilerBarrierDataDependency(); } +#if defined(EASTL_ATOMIC_HAS_32BIT) + + TestAtomic32LoadStoreSameAddressSeqCst(); + +#endif + +#if defined(EASTL_ATOMIC_HAS_128BIT) + + TestAtomic128LoadStoreSameAddressSeqCst(); + +#endif + return nErrorCount; } diff --git a/test/source/TestAtomicBasic.cpp b/test/source/TestAtomicBasic.cpp index c37b53cc..d082e35f 100644 --- a/test/source/TestAtomicBasic.cpp +++ b/test/source/TestAtomicBasic.cpp @@ -1,3826 +1,3985 @@ -///////////////////////////////////////////////////////////////////////////// -// Copyright (c) Electronic Arts Inc. All rights reserved. -///////////////////////////////////////////////////////////////////////////// - - -#include "EASTLTest.h" - -#include - - -/** - * This is a basic test suite that tests all functionality is implemented - * and that all operations do as expected. - * I.E. fetch_add returns the previous value and add_fetch returns the current value - */ - -class AtomicStandaloneBasicTest -{ -public: - - int RunTest() - { - AtomicSignalFence(); - - AtomicThreadFence(); - - AtomicCpuPause(); - - AtomicCompilerBarrier(); - - return nErrorCount; - } - -private: - - void AtomicSignalFence(); - - void AtomicThreadFence(); - - void AtomicCpuPause(); - - void AtomicCompilerBarrier(); - -private: - - int nErrorCount = 0; -}; - -void AtomicStandaloneBasicTest::AtomicSignalFence() -{ - eastl::atomic_signal_fence(eastl::memory_order_relaxed); - - eastl::atomic_signal_fence(eastl::memory_order_acquire); - - eastl::atomic_signal_fence(eastl::memory_order_release); - - eastl::atomic_signal_fence(eastl::memory_order_acq_rel); - - eastl::atomic_signal_fence(eastl::memory_order_seq_cst); -} - -void AtomicStandaloneBasicTest::AtomicThreadFence() -{ - eastl::atomic_thread_fence(eastl::memory_order_relaxed); - - eastl::atomic_thread_fence(eastl::memory_order_acquire); - - eastl::atomic_thread_fence(eastl::memory_order_release); - - eastl::atomic_thread_fence(eastl::memory_order_acq_rel); - - eastl::atomic_thread_fence(eastl::memory_order_seq_cst); -} - -void AtomicStandaloneBasicTest::AtomicCpuPause() -{ - eastl::cpu_pause(); -} - -void AtomicStandaloneBasicTest::AtomicCompilerBarrier() -{ - eastl::compiler_barrier(); - - { - bool ret = false; - eastl::compiler_barrier_data_dependency(ret); - } -} - -class AtomicFlagBasicTest -{ -public: - - using AtomicType = eastl::atomic_flag; - using BoolType = bool; - - int RunTest() - { - TestAtomicFlagCtor(); - - TestAtomicFlagClear(); - - TestAtomicFlagTestAndSet(); - - TestAtomicFlagTest(); - - TestAllMemoryOrders(); - - TestAtomicFlagStandalone(); - - return nErrorCount; - } - -private: - - void TestAtomicFlagCtor(); - - void TestAtomicFlagClear(); - - void TestAtomicFlagTestAndSet(); - - void TestAtomicFlagTest(); - - void TestAllMemoryOrders(); - - void TestAtomicFlagStandalone(); - -private: - - int nErrorCount = 0; -}; - -void AtomicFlagBasicTest::TestAtomicFlagCtor() -{ - { - AtomicType atomic; - - VERIFY(atomic.test(eastl::memory_order_relaxed) == false); - } - - { - AtomicType atomic{ false }; - - VERIFY(atomic.test(eastl::memory_order_relaxed) == false); - } - - { - AtomicType atomic{ true }; - - VERIFY(atomic.test(eastl::memory_order_relaxed) == true); - } -} - -void AtomicFlagBasicTest::TestAtomicFlagClear() -{ - { - AtomicType atomic; - - atomic.clear(eastl::memory_order_relaxed); - - VERIFY(atomic.test(eastl::memory_order_relaxed) == false); - } - - { - AtomicType atomic{ true }; - - atomic.clear(eastl::memory_order_relaxed); - - VERIFY(atomic.test(eastl::memory_order_relaxed) == false); - } -} - -void AtomicFlagBasicTest::TestAtomicFlagTestAndSet() -{ - { - AtomicType atomic; - - BoolType ret = atomic.test_and_set(eastl::memory_order_relaxed); - - VERIFY(ret == false); - - VERIFY(atomic.test(eastl::memory_order_relaxed) == true); - } - - { - AtomicType atomic{ true }; - - BoolType ret = atomic.test_and_set(eastl::memory_order_relaxed); - - VERIFY(ret == true); - - VERIFY(atomic.test(eastl::memory_order_relaxed) == true); - } -} - -void AtomicFlagBasicTest::TestAtomicFlagTest() -{ - { - AtomicType atomic; - - VERIFY(atomic.test(eastl::memory_order_relaxed) == false); - } - - { - AtomicType atomic{ true }; - - VERIFY(atomic.test(eastl::memory_order_relaxed) == true); - } -} - -void AtomicFlagBasicTest::TestAllMemoryOrders() -{ - { - AtomicType atomic; - - atomic.clear(); - - atomic.clear(eastl::memory_order_relaxed); - - atomic.clear(eastl::memory_order_release); - - atomic.clear(eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - atomic.test_and_set(); - - atomic.test_and_set(eastl::memory_order_relaxed); - - atomic.test_and_set(eastl::memory_order_acquire); - - atomic.test_and_set(eastl::memory_order_release); - - atomic.test_and_set(eastl::memory_order_acq_rel); - - atomic.test_and_set(eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType ret = atomic.test(); - - ret = atomic.test(eastl::memory_order_relaxed); - - ret = atomic.test(eastl::memory_order_acquire); - - ret = atomic.test(eastl::memory_order_seq_cst); - } -} - -void AtomicFlagBasicTest::TestAtomicFlagStandalone() -{ - { - AtomicType atomic; - - BoolType ret = atomic_flag_test_and_set(&atomic); - - ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_relaxed); - - ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_acquire); - - ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_release); - - ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_acq_rel); - - ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - atomic_flag_clear(&atomic); - - atomic_flag_clear_explicit(&atomic, eastl::memory_order_relaxed); - - atomic_flag_clear_explicit(&atomic, eastl::memory_order_release); - - atomic_flag_clear_explicit(&atomic, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType ret = atomic_flag_test(&atomic); - - ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_relaxed); - - ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_acquire); - - ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_seq_cst); - } -} - -class AtomicVoidPointerBasicTest -{ -public: - - using AtomicType = eastl::atomic; - using PtrType = void*; - - int RunTest() - { - TestAtomicCtor(); - - TestAssignmentOperators(); - - TestIsLockFree(); - - TestStore(); - - TestLoad(); - - TestExchange(); - - TestCompareExchangeWeak(); - - TestCompareExchangeStrong(); - - TestAllMemoryOrders(); - - return nErrorCount; - } - -private: - - void TestAtomicCtor(); - - void TestAssignmentOperators(); - - void TestIsLockFree(); - - void TestStore(); - - void TestLoad(); - - void TestExchange(); - - void TestCompareExchangeWeak(); - - void TestCompareExchangeStrong(); - - void TestAllMemoryOrders(); - -private: - - int nErrorCount = 0; -}; - -void AtomicVoidPointerBasicTest::TestAtomicCtor() -{ - { - AtomicType atomic; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } - - { - AtomicType atomic{ (PtrType)0x04 }; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x04); - } -} - -void AtomicVoidPointerBasicTest::TestAssignmentOperators() -{ - { - AtomicType atomic; - - PtrType ret = atomic = (PtrType)0x04; - - VERIFY(ret == (PtrType)0x04); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x04); - } - - { - AtomicType atomic; - - PtrType ret = atomic = (PtrType)0x0; - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } -} - -void AtomicVoidPointerBasicTest::TestIsLockFree() -{ - { - AtomicType atomic; - - VERIFY(atomic.is_lock_free() == true); - - VERIFY(atomic.is_always_lock_free == true); - } -} - -void AtomicVoidPointerBasicTest::TestStore() -{ - { - PtrType val = (PtrType)0x0; - AtomicType atomic; - - atomic.store(val, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == val); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic; - - atomic.store(val, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == val); - } -} - -void AtomicVoidPointerBasicTest::TestLoad() -{ - { - AtomicType atomic{ (PtrType)0x4 }; - - PtrType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic == (PtrType)0x4); - } -} - -void AtomicVoidPointerBasicTest::TestExchange() -{ - { - AtomicType atomic; - - PtrType ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } -} - -void AtomicVoidPointerBasicTest::TestCompareExchangeWeak() -{ - { - AtomicType atomic; - - PtrType observed = (PtrType)0x0; - bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - if (ret) - { - VERIFY(ret == true); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - } - - { - AtomicType atomic; - - PtrType observed = (PtrType)0x4; - bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } -} - -void AtomicVoidPointerBasicTest::TestCompareExchangeStrong() -{ - { - AtomicType atomic; - - PtrType observed = (PtrType)0x0; - bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - VERIFY(ret == true); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType observed = (PtrType)0x4; - bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } -} - -void AtomicVoidPointerBasicTest::TestAllMemoryOrders() -{ - { - AtomicType atomic; - PtrType val = (PtrType)0x4; - - atomic.store(val); - - atomic.store(val, eastl::memory_order_relaxed); - - atomic.store(val, eastl::memory_order_release); - - atomic.store(val, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - PtrType ret = atomic.load(); - - ret = atomic.load(eastl::memory_order_relaxed); - - ret = atomic.load(eastl::memory_order_acquire); - - ret = atomic.load(eastl::memory_order_seq_cst); - - ret = atomic.load(eastl::memory_order_read_depends); - } - - { - AtomicType atomic; - - PtrType ret = atomic.exchange((PtrType)0x4); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_relaxed); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acquire); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acq_rel); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - - bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - - bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - bool ret; - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - bool ret; - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } -} - -class AtomicPointerBasicTest -{ -public: - - using AtomicType = eastl::atomic; - using PtrType = uint32_t*; - - int RunTest() - { - TestAtomicCtor(); - - TestAssignmentOperators(); - - TestIsLockFree(); - - TestStore(); - - TestLoad(); - - TestExchange(); - - TestCompareExchangeWeak(); - - TestCompareExchangeStrong(); - - TestAllMemoryOrders(); - - TestFetchAdd(); - TestAddFetch(); - - TestFetchSub(); - TestSubFetch(); - - TestAtomicPointerStandalone(); - - return nErrorCount; - } - -private: - - void TestAtomicCtor(); - - void TestAssignmentOperators(); - - void TestIsLockFree(); - - void TestStore(); - - void TestLoad(); - - void TestExchange(); - - void TestCompareExchangeWeak(); - - void TestCompareExchangeStrong(); - - void TestAllMemoryOrders(); - - void TestFetchAdd(); - void TestAddFetch(); - - void TestFetchSub(); - void TestSubFetch(); - - void TestAtomicPointerStandalone(); - -private: - - int nErrorCount = 0; -}; - -void AtomicPointerBasicTest::TestAtomicCtor() -{ - { - AtomicType atomic{}; - - PtrType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == nullptr); - } - - { - AtomicType atomic{ (PtrType)0x4 }; - - PtrType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - } -} - -void AtomicPointerBasicTest::TestAssignmentOperators() -{ - { - PtrType val = (PtrType)0x4; - AtomicType atomic{val}; - - PtrType expected = (PtrType)0x8; - - PtrType ret = atomic = expected; - - VERIFY(ret == expected); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - PtrType val = (PtrType)0x0; - AtomicType atomic{val}; - - PtrType ret = atomic = val; - - VERIFY(ret == val); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == val); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{val}; - - PtrType expected = (PtrType)0x8; - PtrType ret = ++atomic; - - VERIFY(ret == expected); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{val}; - - PtrType expected = (PtrType)0x8; - PtrType ret = atomic++; - - VERIFY(ret == val); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{val}; - - PtrType expected = (PtrType)0x10; - PtrType ret = atomic += 3; - - VERIFY(ret == expected); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{val}; - - PtrType expected = (PtrType)0x4; - PtrType ret = atomic += 0; - - VERIFY(ret == expected); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{val}; - - PtrType expected = (PtrType)0x0; - PtrType ret = atomic -= 1; - - VERIFY(ret == expected); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{val}; - - PtrType expected = (PtrType)0x4; - PtrType ret = atomic -= 0; - - VERIFY(ret == expected); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } -} - -void AtomicPointerBasicTest::TestIsLockFree() -{ - { - AtomicType atomic; - - VERIFY(atomic.is_lock_free() == true); - - VERIFY(atomic.is_always_lock_free == true); - } -} - -void AtomicPointerBasicTest::TestStore() -{ - { - PtrType val = (PtrType)0x0; - AtomicType atomic; - - atomic.store(val, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == val); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic; - - atomic.store(val, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == val); - } -} - -void AtomicPointerBasicTest::TestLoad() -{ - { - AtomicType atomic{ (PtrType)0x4 }; - - PtrType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic == (PtrType)0x4); - } -} - -void AtomicPointerBasicTest::TestCompareExchangeWeak() -{ - { - AtomicType atomic; - - PtrType observed = (PtrType)0x0; - bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - if (ret) - { - VERIFY(ret == true); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - } - - { - AtomicType atomic; - - PtrType observed = (PtrType)0x4; - bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } -} - -void AtomicPointerBasicTest::TestCompareExchangeStrong() -{ - { - AtomicType atomic; - - PtrType observed = (PtrType)0x0; - bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - VERIFY(ret == true); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType observed = (PtrType)0x4; - bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } -} - -void AtomicPointerBasicTest::TestExchange() -{ - { - AtomicType atomic; - - PtrType ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } -} - -void AtomicPointerBasicTest::TestAllMemoryOrders() -{ - { - AtomicType atomic; - PtrType val = (PtrType)0x4; - - atomic.store(val); - - atomic.store(val, eastl::memory_order_relaxed); - - atomic.store(val, eastl::memory_order_release); - - atomic.store(val, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - PtrType ret = atomic.load(); - - ret = atomic.load(eastl::memory_order_relaxed); - - ret = atomic.load(eastl::memory_order_acquire); - - ret = atomic.load(eastl::memory_order_seq_cst); - - ret = atomic.load(eastl::memory_order_read_depends); - } - - { - AtomicType atomic; - - PtrType ret = atomic.fetch_add(0); - - ret = atomic.fetch_add(0, eastl::memory_order_relaxed); - - ret = atomic.fetch_add(0, eastl::memory_order_acquire); - - ret = atomic.fetch_add(0, eastl::memory_order_release); - - ret = atomic.fetch_add(0, eastl::memory_order_acq_rel); - - ret = atomic.fetch_add(0, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - PtrType ret = atomic.fetch_sub(0); - - ret = atomic.fetch_sub(0, eastl::memory_order_relaxed); - - ret = atomic.fetch_sub(0, eastl::memory_order_acquire); - - ret = atomic.fetch_sub(0, eastl::memory_order_release); - - ret = atomic.fetch_sub(0, eastl::memory_order_acq_rel); - - ret = atomic.fetch_sub(0, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - PtrType ret = atomic.add_fetch(0); - - ret = atomic.add_fetch(0, eastl::memory_order_relaxed); - - ret = atomic.add_fetch(0, eastl::memory_order_acquire); - - ret = atomic.add_fetch(0, eastl::memory_order_release); - - ret = atomic.add_fetch(0, eastl::memory_order_acq_rel); - - ret = atomic.add_fetch(0, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - PtrType ret = atomic.sub_fetch(0); - - ret = atomic.sub_fetch(0, eastl::memory_order_relaxed); - - ret = atomic.sub_fetch(0, eastl::memory_order_acquire); - - ret = atomic.sub_fetch(0, eastl::memory_order_release); - - ret = atomic.sub_fetch(0, eastl::memory_order_acq_rel); - - ret = atomic.sub_fetch(0, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - PtrType ret = atomic.exchange((PtrType)0x4); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_relaxed); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acquire); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acq_rel); - - ret = atomic.exchange((PtrType)0x4, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - - bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - - bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - bool ret; - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - PtrType observed = (PtrType)0x0; - bool ret; - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } -} - -void AtomicPointerBasicTest::TestFetchAdd() -{ - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.fetch_add(1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x8); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.fetch_add(0, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } -} - -void AtomicPointerBasicTest::TestAddFetch() -{ - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.add_fetch(1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x8); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x8); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.add_fetch(0, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } -} - -void AtomicPointerBasicTest::TestFetchSub() -{ - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.fetch_sub(0, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } -} - -void AtomicPointerBasicTest::TestSubFetch() -{ - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } - - { - PtrType val = (PtrType)0x4; - AtomicType atomic{ val }; - - PtrType ret = atomic.sub_fetch(0, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } -} - -void AtomicPointerBasicTest::TestAtomicPointerStandalone() -{ - { - AtomicType atomic; - - VERIFY(atomic_is_lock_free(&atomic) == true); - } - - { - AtomicType atomic; - PtrType val = (PtrType)0x4; - - atomic_store(&atomic, val); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == val); - } - - { - AtomicType atomic; - PtrType val = (PtrType)0x4; - - atomic_store_explicit(&atomic, val, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == val); - } - - { - AtomicType atomic; - - PtrType ret = atomic_load(&atomic); - - VERIFY(ret == (PtrType)0x0); - } - - { - AtomicType atomic; - - PtrType ret = atomic_load_explicit(&atomic, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x0); - } - - { - AtomicType atomic; - - PtrType ret = atomic_load_cond(&atomic, [](PtrType val) { return true; }); - - VERIFY(ret == (PtrType)0x0); - } - - { - AtomicType atomic; - - PtrType ret = atomic_load_cond_explicit(&atomic, [](PtrType val) { return true; }, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x0); - } - - { - AtomicType atomic; - - PtrType ret = atomic_exchange(&atomic, (PtrType)0x4); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType ret = atomic_exchange_explicit(&atomic, (PtrType)0x4, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType ret = atomic_add_fetch(&atomic, 1); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType ret = atomic_add_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType ret = atomic_fetch_add(&atomic, 1); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType ret = atomic_fetch_add_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic{ (PtrType)0x4 }; - - PtrType ret = atomic_fetch_sub(&atomic, 1); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } - - { - AtomicType atomic{ (PtrType)0x4 }; - - PtrType ret = atomic_fetch_sub_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x4); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } - - { - AtomicType atomic{ (PtrType)0x4 }; - - PtrType ret = atomic_sub_fetch(&atomic, 1); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } - - { - AtomicType atomic{ (PtrType)0x4 }; - - PtrType ret = atomic_sub_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == (PtrType)0x0); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); - } - - { - AtomicType atomic; - - PtrType expected = (PtrType)0x0; - bool ret = atomic_compare_exchange_strong(&atomic, &expected, (PtrType)0x4); - - VERIFY(ret == true); - - VERIFY(expected == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType expected = (PtrType)0x0; - bool ret = atomic_compare_exchange_strong_explicit(&atomic, &expected, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - VERIFY(ret == true); - - VERIFY(expected == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - - { - AtomicType atomic; - - PtrType expected = (PtrType)0x0; - bool ret = atomic_compare_exchange_weak(&atomic, &expected, (PtrType)0x4); - - if (ret) - { - VERIFY(ret == true); - - VERIFY(expected == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - } - - { - AtomicType atomic; - - PtrType expected = (PtrType)0x0; - bool ret = atomic_compare_exchange_weak_explicit(&atomic, &expected, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - if (ret) - { - VERIFY(ret == true); - - VERIFY(expected == (PtrType)0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); - } - } -} - -struct AtomicUserType16 -{ - uint8_t a; - uint8_t b; - - friend bool operator==(const AtomicUserType16& a, const AtomicUserType16& b) - { - return (a.a == b.a) && (a.b == b.b); - } -}; - -struct AtomicUserType128 -{ - uint32_t a; - uint32_t b; - uint32_t c; - uint32_t d; - - AtomicUserType128() = default; - - AtomicUserType128(const AtomicUserType128&) = default; - - AtomicUserType128(uint32_t a, uint32_t b) - : a(a) - , b(b) - , c(0) - , d(0) - { - } - - AtomicUserType128& operator=(const AtomicUserType128&) = default; - - friend bool operator==(const AtomicUserType128& a, const AtomicUserType128& b) - { - return (a.a == b.a) && (a.b == b.b) && (a.c == b.c) && (a.d == b.d); - } -}; - -template -class AtomicUserTypeBasicTest -{ -public: - - using AtomicType = eastl::atomic; - using UserType = T; - - int RunTest() - { - TestAtomicCtor(); - - TestAssignmentOperators(); - - TestIsLockFree(); - - TestStore(); - - TestLoad(); - - TestExchange(); - - TestCompareExchangeWeak(); - - TestCompareExchangeStrong(); - - TestAllMemoryOrders(); - - return nErrorCount; - } - -private: - - void TestAtomicCtor(); - - void TestAssignmentOperators(); - - void TestIsLockFree(); - - void TestStore(); - - void TestLoad(); - - void TestExchange(); - - void TestCompareExchangeWeak(); - - void TestCompareExchangeStrong(); - - void TestAllMemoryOrders(); - -private: - - int nErrorCount = 0; -}; - -template -void AtomicUserTypeBasicTest::TestAtomicCtor() -{ - { - AtomicType atomic; - UserType expected{0, 0}; - - UserType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == expected); - } - - { - AtomicType atomic{ {5, 8} }; - UserType expected{5, 8}; - - UserType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == expected); - } -} - -template -void AtomicUserTypeBasicTest::TestAssignmentOperators() -{ - { - AtomicType atomic; - UserType expected{5, 6}; - - atomic = {5, 6}; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - AtomicType atomic; - UserType expected{0, 0}; - - atomic = {0, 0}; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } -} - -template -void AtomicUserTypeBasicTest::TestIsLockFree() -{ - { - AtomicType atomic; - - VERIFY(atomic.is_lock_free() == true); - - VERIFY(AtomicType::is_always_lock_free == true); - } -} - -template -void AtomicUserTypeBasicTest::TestStore() -{ - { - AtomicType atomic; - UserType expected{5, 6}; - - atomic.store(expected, eastl::memory_order_relaxed); - - UserType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == expected); - } - - { - AtomicType atomic; - UserType expected{5, 6}; - - atomic.store({5, 6}, eastl::memory_order_relaxed); - - UserType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == expected); - } -} - -template -void AtomicUserTypeBasicTest::TestLoad() -{ - { - AtomicType atomic; - UserType expected{0, 0}; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - - VERIFY(atomic == expected); - } - - { - AtomicType atomic{ {5, 6} }; - UserType expected{5, 6}; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - - VERIFY(atomic == expected); - } -} - -template -void AtomicUserTypeBasicTest::TestExchange() -{ - { - AtomicType atomic; - UserType expected{0, 0}; - - UserType ret = atomic.exchange({0, 0}, eastl::memory_order_relaxed); - - VERIFY(ret == expected); - } - - { - AtomicType atomic; - UserType expected{0, 0}; - UserType expected2{0, 1}; - - UserType ret = atomic.exchange({0, 1}, eastl::memory_order_relaxed); - - VERIFY(ret == expected); - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected2); - } -} - -template -void AtomicUserTypeBasicTest::TestCompareExchangeWeak() -{ - { - AtomicType atomic; - - UserType observed{0, 0}; - bool ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed); - - UserType expected{0, 0}; - if (ret) - { - VERIFY(ret == true); - VERIFY(observed == expected); - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - } - - { - AtomicType atomic; - - UserType observed{0, 0}; - bool ret = atomic.compare_exchange_weak(observed, {0, 1}, eastl::memory_order_relaxed); - - UserType expected{0, 1}; - UserType expected2{0, 0}; - if (ret) - { - VERIFY(ret == true); - VERIFY(observed == expected2); - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - } - - { - AtomicType atomic; - - UserType observed{0, 1}; - bool ret = atomic.compare_exchange_weak(observed, {0, 1}, eastl::memory_order_relaxed); - - UserType expected{0, 0}; - - VERIFY(ret == false); - VERIFY(observed == expected); - } -} - -template -void AtomicUserTypeBasicTest::TestCompareExchangeStrong() -{ - { - AtomicType atomic; - - UserType observed{0, 0}; - bool ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed); - - UserType expected{0, 0}; - - VERIFY(ret == true); - VERIFY(observed == expected); - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - AtomicType atomic; - - UserType observed{0, 0}; - bool ret = atomic.compare_exchange_strong(observed, {0, 1}, eastl::memory_order_relaxed); - - UserType expected{0, 1}; - UserType expected2{0, 0}; - - VERIFY(ret == true); - VERIFY(observed == expected2); - VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); - } - - { - AtomicType atomic; - - UserType observed{0, 1}; - bool ret = atomic.compare_exchange_strong(observed, {0, 1}, eastl::memory_order_relaxed); - - UserType expected{0, 0}; - - VERIFY(ret == false); - VERIFY(observed == expected); - } -} - -template -void AtomicUserTypeBasicTest::TestAllMemoryOrders() -{ - { - AtomicType atomic; - UserType val{0, 1}; - - atomic.store(val); - - atomic.store(val, eastl::memory_order_relaxed); - - atomic.store(val, eastl::memory_order_release); - - atomic.store(val, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - UserType ret = atomic.load(); - - ret = atomic.load(eastl::memory_order_relaxed); - - ret = atomic.load(eastl::memory_order_acquire); - - ret = atomic.load(eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - UserType ret = atomic.exchange({0, 1}); - - ret = atomic.exchange({0, 0}, eastl::memory_order_relaxed); - - ret = atomic.exchange({0, 0}, eastl::memory_order_acquire); - - ret = atomic.exchange({0, 0}, eastl::memory_order_release); - - ret = atomic.exchange({0, 0}, eastl::memory_order_acq_rel); - - ret = atomic.exchange({0, 0}, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - UserType observed{0, 0}; - - bool ret = atomic.compare_exchange_weak(observed, {0, 0}); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_release); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - UserType observed{0, 0}; - - bool ret = atomic.compare_exchange_strong(observed, {0, 0}); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_release); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - UserType observed{0, 0}; - bool ret; - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - UserType observed{0, 0}; - bool ret; - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } -} - - -class AtomicBoolBasicTest -{ -public: - - using AtomicType = eastl::atomic; - using BoolType = bool; - - int RunTest() - { - TestAtomicCtor(); - - TestAssignmentOperators(); - - TestIsLockFree(); - - TestStore(); - - TestLoad(); - - TestExchange(); - - TestCompareExchangeWeak(); - - TestCompareExchangeStrong(); - - TestAllMemoryOrders(); - - return nErrorCount; - } - -private: - - void TestAtomicCtor(); - - void TestAssignmentOperators(); - - void TestIsLockFree(); - - void TestStore(); - - void TestLoad(); - - void TestExchange(); - - void TestCompareExchangeWeak(); - - void TestCompareExchangeStrong(); - - void TestAllMemoryOrders(); - -private: - - int nErrorCount = 0; -}; - -void AtomicBoolBasicTest::TestAtomicCtor() -{ - { - AtomicType atomic{ false }; - - BoolType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == false); - } - - { - AtomicType atomic{ true }; - - BoolType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == true); - } - - { - AtomicType atomic; - - BoolType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == false); - } - - { - AtomicType atomic{}; - - BoolType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == false); - } -} - -void AtomicBoolBasicTest::TestAssignmentOperators() -{ - { - AtomicType atomic; - - BoolType ret = atomic = true; - - VERIFY(ret == true); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == true); - } -} - -void AtomicBoolBasicTest::TestIsLockFree() -{ - { - AtomicType atomic; - - bool ret = atomic.is_lock_free(); - - VERIFY(ret == true); - - VERIFY(AtomicType::is_always_lock_free == true); - } -} - -void AtomicBoolBasicTest::TestStore() -{ - { - AtomicType atomic; - - atomic.store(true, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == true); - } -} - -void AtomicBoolBasicTest::TestLoad() -{ - { - AtomicType atomic; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == false); - - VERIFY(atomic == false); - } - - { - AtomicType atomic{ true }; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == true); - - VERIFY(atomic == true); - } -} - -void AtomicBoolBasicTest::TestExchange() -{ - { - AtomicType atomic; - - BoolType ret = atomic.exchange(false, eastl::memory_order_relaxed); - - VERIFY(ret == false); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == false); - } - - { - AtomicType atomic; - - BoolType ret = atomic.exchange(true, eastl::memory_order_relaxed); - - VERIFY(ret == false); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == true); - } -} - -void AtomicBoolBasicTest::TestCompareExchangeWeak() -{ - { - AtomicType atomic{ false }; - - BoolType observed = false; - bool ret = atomic.compare_exchange_weak(observed, false, eastl::memory_order_relaxed); - - if (ret) - { - VERIFY(ret == true); - VERIFY(observed == false); - VERIFY(atomic.load(eastl::memory_order_relaxed) == false); - } - } - - { - AtomicType atomic{ false }; - - BoolType observed = false; - bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); - - if (ret) - { - VERIFY(ret == true); - VERIFY(observed == false); - VERIFY(atomic.load(eastl::memory_order_relaxed) == true); - } - } - - { - AtomicType atomic{ false }; - - BoolType observed = true; - bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == false); - } -} - -void AtomicBoolBasicTest::TestCompareExchangeStrong() -{ - { - AtomicType atomic{ false }; - - BoolType observed = false; - bool ret = atomic.compare_exchange_weak(observed, false, eastl::memory_order_relaxed); - - VERIFY(ret == true); - VERIFY(observed == false); - VERIFY(atomic.load(eastl::memory_order_relaxed) == false); - } - - { - AtomicType atomic{ false }; - - BoolType observed = false; - bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); - - VERIFY(ret == true); - VERIFY(observed == false); - VERIFY(atomic.load(eastl::memory_order_relaxed) == true); - } - - { - AtomicType atomic{ false }; - - BoolType observed = true; - bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == false); - } -} - -void AtomicBoolBasicTest::TestAllMemoryOrders() -{ - { - AtomicType atomic; - - atomic.store(true); - - atomic.store(true, eastl::memory_order_relaxed); - - atomic.store(true, eastl::memory_order_release); - - atomic.store(true, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType ret = atomic.load(); - - ret = atomic.load(eastl::memory_order_relaxed); - - ret = atomic.load(eastl::memory_order_acquire); - - ret = atomic.load(eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType ret = atomic.exchange(true); - - ret = atomic.exchange(true, eastl::memory_order_relaxed); - - ret = atomic.exchange(true, eastl::memory_order_acquire); - - ret = atomic.exchange(true, eastl::memory_order_release); - - ret = atomic.exchange(true, eastl::memory_order_acq_rel); - - ret = atomic.exchange(true, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType observed = false; - bool ret = atomic.compare_exchange_weak(observed, true); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_release); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType observed = false; - bool ret = atomic.compare_exchange_strong(observed, true); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_release); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType observed = false; - bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic; - - BoolType observed = false; - bool ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_release, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); - } -} - - -template -class AtomicIntegralBasicTest -{ -public: - - using AtomicType = eastl::atomic; - using IntegralType = T; - - int RunTest() - { - TestAtomicCtor(); - - TestAtomicFetchAdd(); - TestAtomicAddFetch(); - - TestAtomicFetchSub(); - TestAtomicSubFetch(); - - TestAtomicFetchAnd(); - TestAtomicAndFetch(); - - TestAtomicFetchOr(); - TestAtomicOrFetch(); - - TestAtomicFetchXor(); - TestAtomicXorFetch(); - - TestAssignmentOperators(); - - TestIsLockFree(); - - TestStore(); - - TestLoad(); - - TestExchange(); - - TestCompareExchangeWeak(); - - TestCompareExchangeStrong(); - - TestAllMemoryOrders(); - - TestAtomicStandalone(); - - return nErrorCount; - } - -private: - - void TestAtomicCtor(); - - void TestAtomicFetchAdd(); - void TestAtomicAddFetch(); - - void TestAtomicFetchSub(); - void TestAtomicSubFetch(); - - void TestAtomicFetchAnd(); - void TestAtomicAndFetch(); - - void TestAtomicFetchOr(); - void TestAtomicOrFetch(); - - void TestAtomicFetchXor(); - void TestAtomicXorFetch(); - - void TestAssignmentOperators(); - - void TestIsLockFree(); - - void TestStore(); - - void TestLoad(); - - void TestExchange(); - - void TestCompareExchangeWeak(); - - void TestCompareExchangeStrong(); - - void TestAllMemoryOrders(); - - void TestAtomicStandalone(); - -private: - - int nErrorCount = 0; -}; - -template -void AtomicIntegralBasicTest::TestAtomicCtor() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } - - { - AtomicType atomic{ 20 }; - - IntegralType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 20); - } - - { - AtomicType atomic; - - IntegralType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicFetchAdd() -{ - { - AtomicType atomic; - - IntegralType ret = atomic.fetch_add(1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic.fetch_add(0, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 5 }; - - IntegralType ret = atomic.fetch_add(0, eastl::memory_order_relaxed); - - VERIFY(ret == 5); - - ret = atomic.fetch_add(4, eastl::memory_order_relaxed); - - VERIFY(ret == 5); - - ret = atomic.fetch_add(1, eastl::memory_order_relaxed); - - VERIFY(ret == 9); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 10); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicAddFetch() -{ - { - AtomicType atomic; - - IntegralType ret = atomic.add_fetch(1, eastl::memory_order_relaxed); - - VERIFY(ret == 1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic.add_fetch(0, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 5 }; - - IntegralType ret = atomic.add_fetch(0, eastl::memory_order_relaxed); - - VERIFY(ret == 5); - - ret = atomic.add_fetch(4, eastl::memory_order_relaxed); - - VERIFY(ret == 9); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 9); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicFetchSub() -{ - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); - - VERIFY(ret == 1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic.fetch_sub(0, eastl::memory_order_relaxed); - - VERIFY(ret == 1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } - - { - AtomicType atomic{ 5 }; - - IntegralType ret = atomic.fetch_sub(2, eastl::memory_order_relaxed); - - VERIFY(ret == 5); - - ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); - - VERIFY(ret == 3); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 2); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicSubFetch() -{ - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic.sub_fetch(0, eastl::memory_order_relaxed); - - VERIFY(ret == 1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } - - { - AtomicType atomic{ 5 }; - - IntegralType ret = atomic.sub_fetch(2, eastl::memory_order_relaxed); - - VERIFY(ret == 3); - - ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); - - VERIFY(ret == 2); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 2); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicFetchAnd() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.fetch_and(0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.fetch_and(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 0xF }; - - IntegralType ret = atomic.fetch_and(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0xF); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0X1); - } - - { - AtomicType atomic{ 0xF }; - - IntegralType ret = atomic.fetch_and(0xF0, eastl::memory_order_relaxed); - - VERIFY(ret == 0xF); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicAndFetch() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.and_fetch(0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.and_fetch(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 0xF }; - - IntegralType ret = atomic.and_fetch(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } - - { - AtomicType atomic{ 0xF }; - - IntegralType ret = atomic.and_fetch(0xF0, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicFetchOr() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.fetch_or(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic.fetch_or(0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic.fetch_or(0x2, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x3); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicOrFetch() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.or_fetch(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic.or_fetch(0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic.or_fetch(0x2, eastl::memory_order_relaxed); - - VERIFY(ret == 0x3); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x3); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicFetchXor() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.fetch_xor(0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic.fetch_xor(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - } - - { - AtomicType atomic{ 0x0 }; - - IntegralType ret = atomic.fetch_xor(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } -} - -template -void AtomicIntegralBasicTest::TestAtomicXorFetch() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.xor_fetch(0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic.xor_fetch(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - } - - { - AtomicType atomic{ 0x0 }; - - IntegralType ret = atomic.xor_fetch(0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } -} - -template -void AtomicIntegralBasicTest::TestAssignmentOperators() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = (atomic = 5); - - VERIFY(ret == 5); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 5); - } - - { - AtomicType atomic{ 0 }; - - IntegralType ret = ++atomic; - - VERIFY(ret == 1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } - - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic++; - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = --atomic; - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic--; - - VERIFY(ret == 1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic += 5; - - VERIFY(ret == 5); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 5); - } - - { - AtomicType atomic{ 5 }; - - IntegralType ret = atomic -= 3; - - VERIFY(ret == 2); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 2); - } - - { - AtomicType atomic{ 0x0 }; - - IntegralType ret = atomic |= 0x1; - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic &= 0x1; - - VERIFY(ret == 0x1); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic ^= 0x1; - - VERIFY(ret == 0x0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - } -} - -template -void AtomicIntegralBasicTest::TestIsLockFree() -{ - { - const AtomicType atomic{ 5 }; - - VERIFY(atomic.is_lock_free() == true); - - VERIFY(AtomicType::is_always_lock_free == true); - } -} - -template -void AtomicIntegralBasicTest::TestStore() -{ - { - AtomicType atomic{ 0 }; - - atomic.store(0, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - } - - { - AtomicType atomic{ 0 }; - - atomic.store(1, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } -} - -template -void AtomicIntegralBasicTest::TestLoad() -{ - { - AtomicType atomic{ 0 }; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - - bool ret = atomic == 0; - VERIFY(ret == true); - - VERIFY(atomic == 0); - } - - { - AtomicType atomic{ 5 }; - - VERIFY(atomic.load(eastl::memory_order_relaxed) == 5); - - bool ret = atomic == 5; - VERIFY(ret == true); - - VERIFY(atomic == 5); - } -} - -template -void AtomicIntegralBasicTest::TestExchange() -{ - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.exchange(0, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic{ 0 }; - - IntegralType ret = atomic.exchange(1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - - ret = atomic.load(eastl::memory_order_relaxed); - - VERIFY(ret == 1); - } -} - -template -void AtomicIntegralBasicTest::TestCompareExchangeWeak() -{ - { - AtomicType atomic{ 0 }; - - IntegralType observed = 0; - bool ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed); - - if (ret == true) - { - VERIFY(ret == true); - VERIFY(observed == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - } - - { - AtomicType atomic{ 0 }; - - IntegralType observed = 1; - bool ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - } -} - -template -void AtomicIntegralBasicTest::TestCompareExchangeStrong() -{ - { - AtomicType atomic{ 0 }; - - IntegralType observed = 0; - bool ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed); - - VERIFY(ret == true); - VERIFY(observed == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic{ 0 }; - - IntegralType observed = 1; - bool ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed); - - VERIFY(ret == false); - VERIFY(observed == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - } -} - -template -void AtomicIntegralBasicTest::TestAllMemoryOrders() -{ - { - AtomicType atomic{}; - - atomic.store(1); - - atomic.store(1, eastl::memory_order_relaxed); - - atomic.store(1, eastl::memory_order_release); - - atomic.store(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.load(); - - ret = atomic.load(eastl::memory_order_relaxed); - - ret = atomic.load(eastl::memory_order_acquire); - - ret = atomic.load(eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.exchange(1); - - ret = atomic.exchange(1, eastl::memory_order_relaxed); - - ret = atomic.exchange(1, eastl::memory_order_acquire); - - ret = atomic.exchange(1, eastl::memory_order_release); - - ret = atomic.exchange(1, eastl::memory_order_acq_rel); - - ret = atomic.exchange(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.fetch_add(1); - - ret = atomic.fetch_add(1, eastl::memory_order_relaxed); - - ret = atomic.fetch_add(1, eastl::memory_order_acquire); - - ret = atomic.fetch_add(1, eastl::memory_order_release); - - ret = atomic.fetch_add(1, eastl::memory_order_acq_rel); - - ret = atomic.fetch_add(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.add_fetch(1); - - ret = atomic.add_fetch(1, eastl::memory_order_relaxed); - - ret = atomic.add_fetch(1, eastl::memory_order_acquire); - - ret = atomic.add_fetch(1, eastl::memory_order_release); - - ret = atomic.add_fetch(1, eastl::memory_order_acq_rel); - - ret = atomic.add_fetch(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.fetch_sub(1); - - ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); - - ret = atomic.fetch_sub(1, eastl::memory_order_acquire); - - ret = atomic.fetch_sub(1, eastl::memory_order_release); - - ret = atomic.fetch_sub(1, eastl::memory_order_acq_rel); - - ret = atomic.fetch_sub(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.sub_fetch(1); - - ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); - - ret = atomic.sub_fetch(1, eastl::memory_order_acquire); - - ret = atomic.sub_fetch(1, eastl::memory_order_release); - - ret = atomic.sub_fetch(1, eastl::memory_order_acq_rel); - - ret = atomic.sub_fetch(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.fetch_and(1); - - ret = atomic.fetch_and(1, eastl::memory_order_relaxed); - - ret = atomic.fetch_and(1, eastl::memory_order_acquire); - - ret = atomic.fetch_and(1, eastl::memory_order_release); - - ret = atomic.fetch_and(1, eastl::memory_order_acq_rel); - - ret = atomic.fetch_and(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.and_fetch(1); - - ret = atomic.and_fetch(1, eastl::memory_order_relaxed); - - ret = atomic.and_fetch(1, eastl::memory_order_acquire); - - ret = atomic.and_fetch(1, eastl::memory_order_release); - - ret = atomic.and_fetch(1, eastl::memory_order_acq_rel); - - ret = atomic.and_fetch(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.fetch_or(1); - - ret = atomic.fetch_or(1, eastl::memory_order_relaxed); - - ret = atomic.fetch_or(1, eastl::memory_order_acquire); - - ret = atomic.fetch_or(1, eastl::memory_order_release); - - ret = atomic.fetch_or(1, eastl::memory_order_acq_rel); - - ret = atomic.fetch_or(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.or_fetch(1); - - ret = atomic.or_fetch(1, eastl::memory_order_relaxed); - - ret = atomic.or_fetch(1, eastl::memory_order_acquire); - - ret = atomic.or_fetch(1, eastl::memory_order_release); - - ret = atomic.or_fetch(1, eastl::memory_order_acq_rel); - - ret = atomic.or_fetch(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.fetch_xor(1); - - ret = atomic.fetch_xor(1, eastl::memory_order_relaxed); - - ret = atomic.fetch_xor(1, eastl::memory_order_acquire); - - ret = atomic.fetch_xor(1, eastl::memory_order_release); - - ret = atomic.fetch_xor(1, eastl::memory_order_acq_rel); - - ret = atomic.fetch_xor(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType ret = atomic.xor_fetch(1); - - ret = atomic.xor_fetch(1, eastl::memory_order_relaxed); - - ret = atomic.xor_fetch(1, eastl::memory_order_acquire); - - ret = atomic.xor_fetch(1, eastl::memory_order_release); - - ret = atomic.xor_fetch(1, eastl::memory_order_acq_rel); - - ret = atomic.xor_fetch(1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType observed = 0; - bool ret; - - ret = atomic.compare_exchange_weak(observed, 1); - - ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_release); - - ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType observed = 0; - bool ret; - - ret = atomic.compare_exchange_strong(observed, 1); - - ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_release); - - ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_acq_rel); - - ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType observed = 0; - bool ret; - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_relaxed, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_acquire, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_acquire, - eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_release, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_acq_rel, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_acq_rel, - eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_seq_cst, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_seq_cst, - eastl::memory_order_acquire); - - ret = atomic.compare_exchange_weak(observed, 1, - eastl::memory_order_seq_cst, - eastl::memory_order_seq_cst); - } - - { - AtomicType atomic{}; - - IntegralType observed = 0; - bool ret; - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_relaxed, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_acquire, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_acquire, - eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_release, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_acq_rel, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_acq_rel, - eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_seq_cst, - eastl::memory_order_relaxed); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_seq_cst, - eastl::memory_order_acquire); - - ret = atomic.compare_exchange_strong(observed, 1, - eastl::memory_order_seq_cst, - eastl::memory_order_seq_cst); - } - -} - -template -void AtomicIntegralBasicTest::TestAtomicStandalone() -{ - { - AtomicType atomic; - - IntegralType expected = 0; - bool ret = atomic_compare_exchange_weak(&atomic, &expected, 1); - - if (ret) - { - VERIFY(ret == true); - - VERIFY(expected == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - } - - { - AtomicType atomic; - - IntegralType expected = 0; - bool ret = atomic_compare_exchange_weak_explicit(&atomic, &expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - if (ret) - { - VERIFY(ret == true); - - VERIFY(expected == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - } - - { - AtomicType atomic; - - IntegralType expected = 0; - bool ret = atomic_compare_exchange_strong(&atomic, &expected, 1); - - VERIFY(ret == true); - - VERIFY(expected == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType expected = 0; - bool ret = atomic_compare_exchange_strong_explicit(&atomic, &expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed); - - VERIFY(ret == true); - - VERIFY(expected == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_fetch_xor(&atomic, 0x1); - - VERIFY(ret == 0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_fetch_xor_explicit(&atomic, 0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_xor_fetch(&atomic, 0x1); - - VERIFY(ret == 0x1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_xor_fetch_explicit(&atomic, 0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_fetch_or(&atomic, 0x1); - - VERIFY(ret == 0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_fetch_or_explicit(&atomic, 0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_or_fetch(&atomic, 0x1); - - VERIFY(ret == 0x1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_or_fetch_explicit(&atomic, 0x1, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic_fetch_and(&atomic, 0x0); - - VERIFY(ret == 0x1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic_fetch_and_explicit(&atomic, 0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0x1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic_and_fetch(&atomic, 0x0); - - VERIFY(ret == 0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); - } - - { - AtomicType atomic{ 0x1 }; - - IntegralType ret = atomic_and_fetch_explicit(&atomic, 0x0, eastl::memory_order_relaxed); - - VERIFY(ret == 0x0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic_fetch_sub(&atomic, 1); - - VERIFY(ret == 1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic_fetch_sub_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == 1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic_sub_fetch(&atomic, 1); - - VERIFY(ret == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - } - - { - AtomicType atomic{ 1 }; - - IntegralType ret = atomic_sub_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_fetch_add(&atomic, 1); - - VERIFY(ret == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_fetch_add_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_add_fetch(&atomic, 1); - - VERIFY(ret == 1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_add_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == 1); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_exchange(&atomic, 1); - - VERIFY(ret == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_exchange_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_load(&atomic); - - VERIFY(ret == 0); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_load_explicit(&atomic, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_load_cond(&atomic, [](IntegralType val) { return true; }); - - VERIFY(ret == 0); - } - - { - AtomicType atomic; - - IntegralType ret = atomic_load_cond_explicit(&atomic, [](IntegralType val) { return true; }, eastl::memory_order_relaxed); - - VERIFY(ret == 0); - } - - { - AtomicType atomic; - - atomic_store(&atomic, 1); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - atomic_store_explicit(&atomic, 1, eastl::memory_order_relaxed); - - VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); - } - - { - AtomicType atomic; - - VERIFY(atomic_is_lock_free(&atomic) == true); - } -} - -int TestAtomicBasic() -{ - int nErrorCount = 0; - - #if defined(EASTL_ATOMIC_HAS_8BIT) - { - AtomicIntegralBasicTest u8AtomicTest; - - nErrorCount += u8AtomicTest.RunTest(); - } - #endif - - #if defined(EASTL_ATOMIC_HAS_16BIT) - { - AtomicIntegralBasicTest u16AtomicTest; - - nErrorCount += u16AtomicTest.RunTest(); - } - #endif - - #if defined(EASTL_ATOMIC_HAS_32BIT) - { - AtomicIntegralBasicTest u32AtomicTest; - - nErrorCount += u32AtomicTest.RunTest(); - } - #endif - - #if defined(EASTL_ATOMIC_HAS_64BIT) - { - AtomicIntegralBasicTest u64AtomicTest; - - nErrorCount += u64AtomicTest.RunTest(); - } - #endif - - #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) - { - AtomicIntegralBasicTest<__uint128_t> u128AtomicTest; - - nErrorCount += u128AtomicTest.RunTest(); - } - - { - AtomicIntegralBasicTest u128AtomicTest; - - nErrorCount += u128AtomicTest.RunTest(); - } - #endif - - { - AtomicBoolBasicTest boolAtomicTest; - - nErrorCount += boolAtomicTest.RunTest(); - } - - #if defined(EASTL_ATOMIC_HAS_16BIT) - { - AtomicUserTypeBasicTest userTypeAtomicTest; - - nErrorCount += userTypeAtomicTest.RunTest(); - } - #endif - - #if defined(EASTL_ATOMIC_HAS_128BIT) - { - AtomicUserTypeBasicTest userTypeAtomicTest; - - nErrorCount += userTypeAtomicTest.RunTest(); - } - #endif - - { - AtomicPointerBasicTest ptrAtomicTest; - - nErrorCount += ptrAtomicTest.RunTest(); - } - - { - AtomicVoidPointerBasicTest voidPtrAtomicTest; - - nErrorCount += voidPtrAtomicTest.RunTest(); - } - - { - AtomicFlagBasicTest atomicFlagBasicTest; - - nErrorCount += atomicFlagBasicTest.RunTest(); - } - - { - AtomicStandaloneBasicTest atomicStandaloneBasicTest; - - nErrorCount += atomicStandaloneBasicTest.RunTest(); - } - - return nErrorCount; -} +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLTest.h" + +#include + + +/** + * This is a basic test suite that tests all functionality is implemented + * and that all operations do as expected. + * I.E. fetch_add returns the previous value and add_fetch returns the current value + */ + +class AtomicStandaloneBasicTest +{ +public: + + int RunTest() + { + AtomicSignalFence(); + + AtomicThreadFence(); + + AtomicCpuPause(); + + AtomicCompilerBarrier(); + + return nErrorCount; + } + +private: + + void AtomicSignalFence(); + + void AtomicThreadFence(); + + void AtomicCpuPause(); + + void AtomicCompilerBarrier(); + +private: + + int nErrorCount = 0; +}; + +void AtomicStandaloneBasicTest::AtomicSignalFence() +{ + eastl::atomic_signal_fence(eastl::memory_order_relaxed); + + eastl::atomic_signal_fence(eastl::memory_order_acquire); + + eastl::atomic_signal_fence(eastl::memory_order_release); + + eastl::atomic_signal_fence(eastl::memory_order_acq_rel); + + eastl::atomic_signal_fence(eastl::memory_order_seq_cst); +} + +void AtomicStandaloneBasicTest::AtomicThreadFence() +{ + eastl::atomic_thread_fence(eastl::memory_order_relaxed); + + eastl::atomic_thread_fence(eastl::memory_order_acquire); + + eastl::atomic_thread_fence(eastl::memory_order_release); + + eastl::atomic_thread_fence(eastl::memory_order_acq_rel); + + eastl::atomic_thread_fence(eastl::memory_order_seq_cst); +} + +void AtomicStandaloneBasicTest::AtomicCpuPause() +{ + eastl::cpu_pause(); +} + +void AtomicStandaloneBasicTest::AtomicCompilerBarrier() +{ + eastl::compiler_barrier(); + + { + bool ret = false; + eastl::compiler_barrier_data_dependency(ret); + } +} + +class AtomicFlagBasicTest +{ +public: + + using AtomicType = eastl::atomic_flag; + using BoolType = bool; + + int RunTest() + { + TestAtomicFlagCtor(); + + TestAtomicFlagClear(); + + TestAtomicFlagTestAndSet(); + + TestAtomicFlagTest(); + + TestAllMemoryOrders(); + + TestAtomicFlagStandalone(); + + return nErrorCount; + } + +private: + + void TestAtomicFlagCtor(); + + void TestAtomicFlagClear(); + + void TestAtomicFlagTestAndSet(); + + void TestAtomicFlagTest(); + + void TestAllMemoryOrders(); + + void TestAtomicFlagStandalone(); + +private: + + int nErrorCount = 0; +}; + +void AtomicFlagBasicTest::TestAtomicFlagCtor() +{ + { + AtomicType atomic; + + VERIFY(atomic.test(eastl::memory_order_relaxed) == false); + } + + { + AtomicType atomic{ false }; + + VERIFY(atomic.test(eastl::memory_order_relaxed) == false); + } + + { + AtomicType atomic{ true }; + + VERIFY(atomic.test(eastl::memory_order_relaxed) == true); + } +} + +void AtomicFlagBasicTest::TestAtomicFlagClear() +{ + { + AtomicType atomic; + + atomic.clear(eastl::memory_order_relaxed); + + VERIFY(atomic.test(eastl::memory_order_relaxed) == false); + } + + { + AtomicType atomic{ true }; + + atomic.clear(eastl::memory_order_relaxed); + + VERIFY(atomic.test(eastl::memory_order_relaxed) == false); + } +} + +void AtomicFlagBasicTest::TestAtomicFlagTestAndSet() +{ + { + AtomicType atomic; + + BoolType ret = atomic.test_and_set(eastl::memory_order_relaxed); + + VERIFY(ret == false); + + VERIFY(atomic.test(eastl::memory_order_relaxed) == true); + } + + { + AtomicType atomic{ true }; + + BoolType ret = atomic.test_and_set(eastl::memory_order_relaxed); + + VERIFY(ret == true); + + VERIFY(atomic.test(eastl::memory_order_relaxed) == true); + } +} + +void AtomicFlagBasicTest::TestAtomicFlagTest() +{ + { + AtomicType atomic; + + VERIFY(atomic.test(eastl::memory_order_relaxed) == false); + } + + { + AtomicType atomic{ true }; + + VERIFY(atomic.test(eastl::memory_order_relaxed) == true); + } +} + +void AtomicFlagBasicTest::TestAllMemoryOrders() +{ + { + AtomicType atomic; + + atomic.clear(); + + atomic.clear(eastl::memory_order_relaxed); + + atomic.clear(eastl::memory_order_release); + + atomic.clear(eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + atomic.test_and_set(); + + atomic.test_and_set(eastl::memory_order_relaxed); + + atomic.test_and_set(eastl::memory_order_acquire); + + atomic.test_and_set(eastl::memory_order_release); + + atomic.test_and_set(eastl::memory_order_acq_rel); + + atomic.test_and_set(eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType ret = atomic.test(); + + ret = atomic.test(eastl::memory_order_relaxed); + + ret = atomic.test(eastl::memory_order_acquire); + + ret = atomic.test(eastl::memory_order_seq_cst); + } +} + +void AtomicFlagBasicTest::TestAtomicFlagStandalone() +{ + { + AtomicType atomic; + + BoolType ret = atomic_flag_test_and_set(&atomic); + + ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_relaxed); + + ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_acquire); + + ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_release); + + ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_acq_rel); + + ret = atomic_flag_test_and_set_explicit(&atomic, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + atomic_flag_clear(&atomic); + + atomic_flag_clear_explicit(&atomic, eastl::memory_order_relaxed); + + atomic_flag_clear_explicit(&atomic, eastl::memory_order_release); + + atomic_flag_clear_explicit(&atomic, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType ret = atomic_flag_test(&atomic); + + ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_relaxed); + + ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_acquire); + + ret = atomic_flag_test_explicit(&atomic, eastl::memory_order_seq_cst); + } +} + +class AtomicVoidPointerBasicTest +{ +public: + + using AtomicType = eastl::atomic; + using PtrType = void*; + + int RunTest() + { + TestAtomicCtor(); + + TestAssignmentOperators(); + + TestIsLockFree(); + + TestStore(); + + TestLoad(); + + TestExchange(); + + TestCompareExchangeWeak(); + + TestCompareExchangeStrong(); + + TestAllMemoryOrders(); + + return nErrorCount; + } + +private: + + void TestAtomicCtor(); + + void TestAssignmentOperators(); + + void TestIsLockFree(); + + void TestStore(); + + void TestLoad(); + + void TestExchange(); + + void TestCompareExchangeWeak(); + + void TestCompareExchangeStrong(); + + void TestAllMemoryOrders(); + +private: + + int nErrorCount = 0; +}; + +void AtomicVoidPointerBasicTest::TestAtomicCtor() +{ + { + AtomicType atomic; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } + + { + AtomicType atomic{ (PtrType)0x04 }; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x04); + } +} + +void AtomicVoidPointerBasicTest::TestAssignmentOperators() +{ + { + AtomicType atomic; + + PtrType ret = atomic = (PtrType)0x04; + + VERIFY(ret == (PtrType)0x04); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x04); + } + + { + AtomicType atomic; + + PtrType ret = atomic = (PtrType)0x0; + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } +} + +void AtomicVoidPointerBasicTest::TestIsLockFree() +{ + { + AtomicType atomic; + + VERIFY(atomic.is_lock_free() == true); + + VERIFY(atomic.is_always_lock_free == true); + } +} + +void AtomicVoidPointerBasicTest::TestStore() +{ + { + PtrType val = (PtrType)0x0; + AtomicType atomic; + + atomic.store(val, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == val); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic; + + atomic.store(val, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == val); + } +} + +void AtomicVoidPointerBasicTest::TestLoad() +{ + { + AtomicType atomic{ (PtrType)0x4 }; + + PtrType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic == (PtrType)0x4); + } +} + +void AtomicVoidPointerBasicTest::TestExchange() +{ + { + AtomicType atomic; + + PtrType ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } +} + +void AtomicVoidPointerBasicTest::TestCompareExchangeWeak() +{ + { + AtomicType atomic; + + PtrType observed = (PtrType)0x0; + bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + if (ret) + { + VERIFY(ret == true); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + } + + { + AtomicType atomic; + + PtrType observed = (PtrType)0x4; + bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } +} + +void AtomicVoidPointerBasicTest::TestCompareExchangeStrong() +{ + { + AtomicType atomic; + + PtrType observed = (PtrType)0x0; + bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + VERIFY(ret == true); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType observed = (PtrType)0x4; + bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } +} + +void AtomicVoidPointerBasicTest::TestAllMemoryOrders() +{ + { + AtomicType atomic; + PtrType val = (PtrType)0x4; + + atomic.store(val); + + atomic.store(val, eastl::memory_order_relaxed); + + atomic.store(val, eastl::memory_order_release); + + atomic.store(val, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + PtrType ret = atomic.load(); + + ret = atomic.load(eastl::memory_order_relaxed); + + ret = atomic.load(eastl::memory_order_acquire); + + ret = atomic.load(eastl::memory_order_seq_cst); + + ret = atomic.load(eastl::memory_order_read_depends); + } + + { + AtomicType atomic; + + PtrType ret = atomic.exchange((PtrType)0x4); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_relaxed); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acquire); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acq_rel); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + + bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + + bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + bool ret; + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + bool ret; + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } +} + +class AtomicPointerBasicTest +{ +public: + + using AtomicType = eastl::atomic; + using PtrType = uint32_t*; + + int RunTest() + { + TestAtomicCtor(); + + TestAssignmentOperators(); + + TestIsLockFree(); + + TestStore(); + + TestLoad(); + + TestExchange(); + + TestCompareExchangeWeak(); + + TestCompareExchangeStrong(); + + TestAllMemoryOrders(); + + TestFetchAdd(); + TestAddFetch(); + + TestFetchSub(); + TestSubFetch(); + + TestAtomicPointerStandalone(); + + return nErrorCount; + } + +private: + + void TestAtomicCtor(); + + void TestAssignmentOperators(); + + void TestIsLockFree(); + + void TestStore(); + + void TestLoad(); + + void TestExchange(); + + void TestCompareExchangeWeak(); + + void TestCompareExchangeStrong(); + + void TestAllMemoryOrders(); + + void TestFetchAdd(); + void TestAddFetch(); + + void TestFetchSub(); + void TestSubFetch(); + + void TestAtomicPointerStandalone(); + +private: + + int nErrorCount = 0; +}; + +void AtomicPointerBasicTest::TestAtomicCtor() +{ + { + AtomicType atomic{}; + + PtrType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == nullptr); + } + + { + AtomicType atomic{ (PtrType)0x4 }; + + PtrType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + } +} + +void AtomicPointerBasicTest::TestAssignmentOperators() +{ + { + PtrType val = (PtrType)0x4; + AtomicType atomic{val}; + + PtrType expected = (PtrType)0x8; + + PtrType ret = atomic = expected; + + VERIFY(ret == expected); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + PtrType val = (PtrType)0x0; + AtomicType atomic{val}; + + PtrType ret = atomic = val; + + VERIFY(ret == val); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == val); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{val}; + + PtrType expected = (PtrType)0x8; + PtrType ret = ++atomic; + + VERIFY(ret == expected); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + PtrType val = (PtrType)0x4; + + + AtomicType atomic{val}; + + PtrType expected = (PtrType)0x8; + PtrType ret = atomic++; + + VERIFY(ret == val); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{val}; + + PtrType expected = (PtrType)0x10; + PtrType ret = atomic += 3; + + VERIFY(ret == expected); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{val}; + + PtrType expected = (PtrType)0x4; + PtrType ret = atomic += 0; + + VERIFY(ret == expected); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{val}; + + PtrType expected = (PtrType)0x0; + PtrType ret = atomic -= 1; + + VERIFY(ret == expected); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{val}; + + PtrType expected = (PtrType)0x4; + PtrType ret = atomic -= 0; + + VERIFY(ret == expected); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } +} + +void AtomicPointerBasicTest::TestIsLockFree() +{ + { + AtomicType atomic; + + VERIFY(atomic.is_lock_free() == true); + + VERIFY(atomic.is_always_lock_free == true); + } +} + +void AtomicPointerBasicTest::TestStore() +{ + { + PtrType val = (PtrType)0x0; + AtomicType atomic; + + atomic.store(val, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == val); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic; + + atomic.store(val, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == val); + } +} + +void AtomicPointerBasicTest::TestLoad() +{ + { + AtomicType atomic{ (PtrType)0x4 }; + + PtrType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic == (PtrType)0x4); + } +} + +void AtomicPointerBasicTest::TestCompareExchangeWeak() +{ + { + AtomicType atomic; + + PtrType observed = (PtrType)0x0; + bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + if (ret) + { + VERIFY(ret == true); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + } + + { + AtomicType atomic; + + PtrType observed = (PtrType)0x4; + bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } +} + +void AtomicPointerBasicTest::TestCompareExchangeStrong() +{ + { + AtomicType atomic; + + PtrType observed = (PtrType)0x0; + bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + VERIFY(ret == true); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType observed = (PtrType)0x4; + bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } +} + +void AtomicPointerBasicTest::TestExchange() +{ + { + AtomicType atomic; + + PtrType ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } +} + +void AtomicPointerBasicTest::TestAllMemoryOrders() +{ + { + AtomicType atomic; + PtrType val = (PtrType)0x4; + + atomic.store(val); + + atomic.store(val, eastl::memory_order_relaxed); + + atomic.store(val, eastl::memory_order_release); + + atomic.store(val, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + PtrType ret = atomic.load(); + + ret = atomic.load(eastl::memory_order_relaxed); + + ret = atomic.load(eastl::memory_order_acquire); + + ret = atomic.load(eastl::memory_order_seq_cst); + + ret = atomic.load(eastl::memory_order_read_depends); + } + + { + AtomicType atomic; + + PtrType ret = atomic.fetch_add(0); + + ret = atomic.fetch_add(0, eastl::memory_order_relaxed); + + ret = atomic.fetch_add(0, eastl::memory_order_acquire); + + ret = atomic.fetch_add(0, eastl::memory_order_release); + + ret = atomic.fetch_add(0, eastl::memory_order_acq_rel); + + ret = atomic.fetch_add(0, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + PtrType ret = atomic.fetch_sub(0); + + ret = atomic.fetch_sub(0, eastl::memory_order_relaxed); + + ret = atomic.fetch_sub(0, eastl::memory_order_acquire); + + ret = atomic.fetch_sub(0, eastl::memory_order_release); + + ret = atomic.fetch_sub(0, eastl::memory_order_acq_rel); + + ret = atomic.fetch_sub(0, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + PtrType ret = atomic.add_fetch(0); + + ret = atomic.add_fetch(0, eastl::memory_order_relaxed); + + ret = atomic.add_fetch(0, eastl::memory_order_acquire); + + ret = atomic.add_fetch(0, eastl::memory_order_release); + + ret = atomic.add_fetch(0, eastl::memory_order_acq_rel); + + ret = atomic.add_fetch(0, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + PtrType ret = atomic.sub_fetch(0); + + ret = atomic.sub_fetch(0, eastl::memory_order_relaxed); + + ret = atomic.sub_fetch(0, eastl::memory_order_acquire); + + ret = atomic.sub_fetch(0, eastl::memory_order_release); + + ret = atomic.sub_fetch(0, eastl::memory_order_acq_rel); + + ret = atomic.sub_fetch(0, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + PtrType ret = atomic.exchange((PtrType)0x4); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_relaxed); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acquire); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_release); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_acq_rel); + + ret = atomic.exchange((PtrType)0x4, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + + bool ret = atomic.compare_exchange_weak(observed, (PtrType)0x4); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + + bool ret = atomic.compare_exchange_strong(observed, (PtrType)0x4); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + bool ret; + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + PtrType observed = (PtrType)0x0; + bool ret; + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, (PtrType)0x4, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } +} + +void AtomicPointerBasicTest::TestFetchAdd() +{ + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.fetch_add(1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x8); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.fetch_add(0, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } +} + +void AtomicPointerBasicTest::TestAddFetch() +{ + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.add_fetch(1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x8); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x8); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.add_fetch(0, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } +} + +void AtomicPointerBasicTest::TestFetchSub() +{ + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.fetch_sub(0, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } +} + +void AtomicPointerBasicTest::TestSubFetch() +{ + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } + + { + PtrType val = (PtrType)0x4; + AtomicType atomic{ val }; + + PtrType ret = atomic.sub_fetch(0, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } +} + +void AtomicPointerBasicTest::TestAtomicPointerStandalone() +{ + { + AtomicType atomic; + + VERIFY(atomic_is_lock_free(&atomic) == true); + } + + { + AtomicType atomic; + PtrType val = (PtrType)0x4; + + atomic_store(&atomic, val); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == val); + } + + { + AtomicType atomic; + PtrType val = (PtrType)0x4; + + atomic_store_explicit(&atomic, val, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == val); + } + + { + AtomicType atomic; + + PtrType ret = atomic_load(&atomic); + + VERIFY(ret == (PtrType)0x0); + } + + { + AtomicType atomic; + + PtrType ret = atomic_load_explicit(&atomic, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x0); + } + + { + AtomicType atomic; + + PtrType ret = atomic_load_cond(&atomic, [](PtrType val) { return true; }); + + VERIFY(ret == (PtrType)0x0); + } + + { + AtomicType atomic; + + PtrType ret = atomic_load_cond_explicit(&atomic, [](PtrType val) { return true; }, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x0); + } + + { + AtomicType atomic; + + PtrType ret = atomic_exchange(&atomic, (PtrType)0x4); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType ret = atomic_exchange_explicit(&atomic, (PtrType)0x4, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType ret = atomic_add_fetch(&atomic, 1); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType ret = atomic_add_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType ret = atomic_fetch_add(&atomic, 1); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType ret = atomic_fetch_add_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic{ (PtrType)0x4 }; + + PtrType ret = atomic_fetch_sub(&atomic, 1); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } + + { + AtomicType atomic{ (PtrType)0x4 }; + + PtrType ret = atomic_fetch_sub_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x4); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } + + { + AtomicType atomic{ (PtrType)0x4 }; + + PtrType ret = atomic_sub_fetch(&atomic, 1); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } + + { + AtomicType atomic{ (PtrType)0x4 }; + + PtrType ret = atomic_sub_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == (PtrType)0x0); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x0); + } + + { + AtomicType atomic; + + PtrType expected = (PtrType)0x0; + bool ret = atomic_compare_exchange_strong(&atomic, &expected, (PtrType)0x4); + + VERIFY(ret == true); + + VERIFY(expected == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType expected = (PtrType)0x0; + bool ret = atomic_compare_exchange_strong_explicit(&atomic, &expected, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + VERIFY(ret == true); + + VERIFY(expected == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + + { + AtomicType atomic; + + PtrType expected = (PtrType)0x0; + bool ret = atomic_compare_exchange_weak(&atomic, &expected, (PtrType)0x4); + + if (ret) + { + VERIFY(ret == true); + + VERIFY(expected == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + } + + { + AtomicType atomic; + + PtrType expected = (PtrType)0x0; + bool ret = atomic_compare_exchange_weak_explicit(&atomic, &expected, (PtrType)0x4, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + if (ret) + { + VERIFY(ret == true); + + VERIFY(expected == (PtrType)0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == (PtrType)0x4); + } + } +} + +struct AtomicNonTriviallyConstructible +{ + AtomicNonTriviallyConstructible() + : a(0) + , b(0) + { + } + + AtomicNonTriviallyConstructible(uint16_t a, uint16_t b) + : a(a) + , b(b) + { + } + + friend bool operator==(const AtomicNonTriviallyConstructible& a, const AtomicNonTriviallyConstructible& b) + { + return a.a == b.a && a.b == b.b; + } + + uint16_t a; + uint16_t b; +}; + +struct AtomicNonTriviallyConstructibleNoExcept +{ + AtomicNonTriviallyConstructibleNoExcept() noexcept + : a(0) + , b(0) + { + } + + AtomicNonTriviallyConstructibleNoExcept(uint16_t a, uint16_t b) noexcept + : a(a) + , b(b) + { + } + + friend bool operator==(const AtomicNonTriviallyConstructibleNoExcept& a, const AtomicNonTriviallyConstructibleNoExcept& b) + { + return a.a == b.a && a.b == b.b; + } + + uint16_t a; + uint16_t b; +}; + +struct AtomicUserType16 +{ + uint8_t a; + uint8_t b; + + friend bool operator==(const AtomicUserType16& a, const AtomicUserType16& b) + { + return (a.a == b.a) && (a.b == b.b); + } +}; + +struct AtomicUserType128 +{ + uint32_t a; + uint32_t b; + uint32_t c; + uint32_t d; + + AtomicUserType128() = default; + + AtomicUserType128(const AtomicUserType128&) = default; + + AtomicUserType128(uint32_t a, uint32_t b) + : a(a) + , b(b) + , c(0) + , d(0) + { + } + + AtomicUserType128& operator=(const AtomicUserType128&) = default; + + friend bool operator==(const AtomicUserType128& a, const AtomicUserType128& b) + { + return (a.a == b.a) && (a.b == b.b) && (a.c == b.c) && (a.d == b.d); + } +}; + +template +class AtomicUserTypeBasicTest +{ +public: + + using AtomicType = eastl::atomic; + using UserType = T; + + int RunTest() + { + TestAtomicCtor(); + + TestAssignmentOperators(); + + TestIsLockFree(); + + TestStore(); + + TestLoad(); + + TestExchange(); + + TestCompareExchangeWeak(); + + TestCompareExchangeStrong(); + + TestAllMemoryOrders(); + + return nErrorCount; + } + +private: + + void TestAtomicCtor(); + + void TestAssignmentOperators(); + + void TestIsLockFree(); + + void TestStore(); + + void TestLoad(); + + void TestExchange(); + + void TestCompareExchangeWeak(); + + void TestCompareExchangeStrong(); + + void TestAllMemoryOrders(); + +private: + + int nErrorCount = 0; +}; + +template +void AtomicUserTypeBasicTest::TestAtomicCtor() +{ + { + AtomicType atomic; + UserType expected{0, 0}; + + UserType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == expected); + } + + { + AtomicType atomic{ {5, 8} }; + UserType expected{5, 8}; + + UserType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == expected); + } +} + +template +void AtomicUserTypeBasicTest::TestAssignmentOperators() +{ + { + AtomicType atomic; + UserType expected{5, 6}; + + atomic = {5, 6}; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + AtomicType atomic; + UserType expected{0, 0}; + + atomic = {0, 0}; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } +} + +template +void AtomicUserTypeBasicTest::TestIsLockFree() +{ + { + AtomicType atomic; + + VERIFY(atomic.is_lock_free() == true); + + VERIFY(AtomicType::is_always_lock_free == true); + } +} + +template +void AtomicUserTypeBasicTest::TestStore() +{ + { + AtomicType atomic; + UserType expected{5, 6}; + + atomic.store(expected, eastl::memory_order_relaxed); + + UserType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == expected); + } + + { + AtomicType atomic; + UserType expected{5, 6}; + + atomic.store({5, 6}, eastl::memory_order_relaxed); + + UserType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == expected); + } +} + +template +void AtomicUserTypeBasicTest::TestLoad() +{ + { + AtomicType atomic; + UserType expected{0, 0}; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + + VERIFY(atomic == expected); + } + + { + AtomicType atomic{ {5, 6} }; + UserType expected{5, 6}; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + + VERIFY(atomic == expected); + } +} + +template +void AtomicUserTypeBasicTest::TestExchange() +{ + { + AtomicType atomic; + UserType expected{0, 0}; + + UserType ret = atomic.exchange({0, 0}, eastl::memory_order_relaxed); + + VERIFY(ret == expected); + } + + { + AtomicType atomic; + UserType expected{0, 0}; + UserType expected2{0, 1}; + + UserType ret = atomic.exchange({0, 1}, eastl::memory_order_relaxed); + + VERIFY(ret == expected); + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected2); + } +} + +template +void AtomicUserTypeBasicTest::TestCompareExchangeWeak() +{ + { + AtomicType atomic; + + UserType observed{0, 0}; + bool ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed); + + UserType expected{0, 0}; + if (ret) + { + VERIFY(ret == true); + VERIFY(observed == expected); + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + } + + { + AtomicType atomic; + + UserType observed{0, 0}; + bool ret = atomic.compare_exchange_weak(observed, {0, 1}, eastl::memory_order_relaxed); + + UserType expected{0, 1}; + UserType expected2{0, 0}; + if (ret) + { + VERIFY(ret == true); + VERIFY(observed == expected2); + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + } + + { + AtomicType atomic; + + UserType observed{0, 1}; + bool ret = atomic.compare_exchange_weak(observed, {0, 1}, eastl::memory_order_relaxed); + + UserType expected{0, 0}; + + VERIFY(ret == false); + VERIFY(observed == expected); + } +} + +template +void AtomicUserTypeBasicTest::TestCompareExchangeStrong() +{ + { + AtomicType atomic; + + UserType observed{0, 0}; + bool ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed); + + UserType expected{0, 0}; + + VERIFY(ret == true); + VERIFY(observed == expected); + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + AtomicType atomic; + + UserType observed{0, 0}; + bool ret = atomic.compare_exchange_strong(observed, {0, 1}, eastl::memory_order_relaxed); + + UserType expected{0, 1}; + UserType expected2{0, 0}; + + VERIFY(ret == true); + VERIFY(observed == expected2); + VERIFY(atomic.load(eastl::memory_order_relaxed) == expected); + } + + { + AtomicType atomic; + + UserType observed{0, 1}; + bool ret = atomic.compare_exchange_strong(observed, {0, 1}, eastl::memory_order_relaxed); + + UserType expected{0, 0}; + + VERIFY(ret == false); + VERIFY(observed == expected); + } +} + +template +void AtomicUserTypeBasicTest::TestAllMemoryOrders() +{ + { + AtomicType atomic; + UserType val{0, 1}; + + atomic.store(val); + + atomic.store(val, eastl::memory_order_relaxed); + + atomic.store(val, eastl::memory_order_release); + + atomic.store(val, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + UserType ret = atomic.load(); + + ret = atomic.load(eastl::memory_order_relaxed); + + ret = atomic.load(eastl::memory_order_acquire); + + ret = atomic.load(eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + UserType ret = atomic.exchange({0, 1}); + + ret = atomic.exchange({0, 0}, eastl::memory_order_relaxed); + + ret = atomic.exchange({0, 0}, eastl::memory_order_acquire); + + ret = atomic.exchange({0, 0}, eastl::memory_order_release); + + ret = atomic.exchange({0, 0}, eastl::memory_order_acq_rel); + + ret = atomic.exchange({0, 0}, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + UserType observed{0, 0}; + + bool ret = atomic.compare_exchange_weak(observed, {0, 0}); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_release); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + UserType observed{0, 0}; + + bool ret = atomic.compare_exchange_strong(observed, {0, 0}); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_release); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + UserType observed{0, 0}; + bool ret; + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + UserType observed{0, 0}; + bool ret; + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, {0, 0}, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } +} + + +class AtomicBoolBasicTest +{ +public: + + using AtomicType = eastl::atomic; + using BoolType = bool; + + int RunTest() + { + TestAtomicCtor(); + + TestAssignmentOperators(); + + TestIsLockFree(); + + TestStore(); + + TestLoad(); + + TestExchange(); + + TestCompareExchangeWeak(); + + TestCompareExchangeStrong(); + + TestAllMemoryOrders(); + + return nErrorCount; + } + +private: + + void TestAtomicCtor(); + + void TestAssignmentOperators(); + + void TestIsLockFree(); + + void TestStore(); + + void TestLoad(); + + void TestExchange(); + + void TestCompareExchangeWeak(); + + void TestCompareExchangeStrong(); + + void TestAllMemoryOrders(); + +private: + + int nErrorCount = 0; +}; + +void AtomicBoolBasicTest::TestAtomicCtor() +{ + { + AtomicType atomic{ false }; + + BoolType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == false); + } + + { + AtomicType atomic{ true }; + + BoolType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == true); + } + + { + AtomicType atomic; + + BoolType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == false); + } + + { + AtomicType atomic{}; + + BoolType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == false); + } +} + +void AtomicBoolBasicTest::TestAssignmentOperators() +{ + { + AtomicType atomic; + + BoolType ret = atomic = true; + + VERIFY(ret == true); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == true); + } +} + +void AtomicBoolBasicTest::TestIsLockFree() +{ + { + AtomicType atomic; + + bool ret = atomic.is_lock_free(); + + VERIFY(ret == true); + + VERIFY(AtomicType::is_always_lock_free == true); + } +} + +void AtomicBoolBasicTest::TestStore() +{ + { + AtomicType atomic; + + atomic.store(true, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == true); + } +} + +void AtomicBoolBasicTest::TestLoad() +{ + { + AtomicType atomic; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == false); + + VERIFY(atomic == false); + } + + { + AtomicType atomic{ true }; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == true); + + VERIFY(atomic == true); + } +} + +void AtomicBoolBasicTest::TestExchange() +{ + { + AtomicType atomic; + + BoolType ret = atomic.exchange(false, eastl::memory_order_relaxed); + + VERIFY(ret == false); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == false); + } + + { + AtomicType atomic; + + BoolType ret = atomic.exchange(true, eastl::memory_order_relaxed); + + VERIFY(ret == false); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == true); + } +} + +void AtomicBoolBasicTest::TestCompareExchangeWeak() +{ + { + AtomicType atomic{ false }; + + BoolType observed = false; + bool ret = atomic.compare_exchange_weak(observed, false, eastl::memory_order_relaxed); + + if (ret) + { + VERIFY(ret == true); + VERIFY(observed == false); + VERIFY(atomic.load(eastl::memory_order_relaxed) == false); + } + } + + { + AtomicType atomic{ false }; + + BoolType observed = false; + bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); + + if (ret) + { + VERIFY(ret == true); + VERIFY(observed == false); + VERIFY(atomic.load(eastl::memory_order_relaxed) == true); + } + } + + { + AtomicType atomic{ false }; + + BoolType observed = true; + bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == false); + } +} + +void AtomicBoolBasicTest::TestCompareExchangeStrong() +{ + { + AtomicType atomic{ false }; + + BoolType observed = false; + bool ret = atomic.compare_exchange_weak(observed, false, eastl::memory_order_relaxed); + + VERIFY(ret == true); + VERIFY(observed == false); + VERIFY(atomic.load(eastl::memory_order_relaxed) == false); + } + + { + AtomicType atomic{ false }; + + BoolType observed = false; + bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); + + VERIFY(ret == true); + VERIFY(observed == false); + VERIFY(atomic.load(eastl::memory_order_relaxed) == true); + } + + { + AtomicType atomic{ false }; + + BoolType observed = true; + bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == false); + } +} + +void AtomicBoolBasicTest::TestAllMemoryOrders() +{ + { + AtomicType atomic; + + atomic.store(true); + + atomic.store(true, eastl::memory_order_relaxed); + + atomic.store(true, eastl::memory_order_release); + + atomic.store(true, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType ret = atomic.load(); + + ret = atomic.load(eastl::memory_order_relaxed); + + ret = atomic.load(eastl::memory_order_acquire); + + ret = atomic.load(eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType ret = atomic.exchange(true); + + ret = atomic.exchange(true, eastl::memory_order_relaxed); + + ret = atomic.exchange(true, eastl::memory_order_acquire); + + ret = atomic.exchange(true, eastl::memory_order_release); + + ret = atomic.exchange(true, eastl::memory_order_acq_rel); + + ret = atomic.exchange(true, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType observed = false; + bool ret = atomic.compare_exchange_weak(observed, true); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_release); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType observed = false; + bool ret = atomic.compare_exchange_strong(observed, true); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_release); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType observed = false; + bool ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic; + + BoolType observed = false; + bool ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acquire, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_release, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_acq_rel, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, true, eastl::memory_order_seq_cst, eastl::memory_order_seq_cst); + } +} + + +template +class AtomicIntegralBasicTest +{ +public: + + using AtomicType = eastl::atomic; + using IntegralType = T; + + int RunTest() + { + TestAtomicCtor(); + + TestAtomicFetchAdd(); + TestAtomicAddFetch(); + + TestAtomicFetchSub(); + TestAtomicSubFetch(); + + TestAtomicFetchAnd(); + TestAtomicAndFetch(); + + TestAtomicFetchOr(); + TestAtomicOrFetch(); + + TestAtomicFetchXor(); + TestAtomicXorFetch(); + + TestAssignmentOperators(); + + TestIsLockFree(); + + TestStore(); + + TestLoad(); + + TestExchange(); + + TestCompareExchangeWeak(); + + TestCompareExchangeStrong(); + + TestAllMemoryOrders(); + + TestAtomicStandalone(); + + return nErrorCount; + } + +private: + + void TestAtomicCtor(); + + void TestAtomicFetchAdd(); + void TestAtomicAddFetch(); + + void TestAtomicFetchSub(); + void TestAtomicSubFetch(); + + void TestAtomicFetchAnd(); + void TestAtomicAndFetch(); + + void TestAtomicFetchOr(); + void TestAtomicOrFetch(); + + void TestAtomicFetchXor(); + void TestAtomicXorFetch(); + + void TestAssignmentOperators(); + + void TestIsLockFree(); + + void TestStore(); + + void TestLoad(); + + void TestExchange(); + + void TestCompareExchangeWeak(); + + void TestCompareExchangeStrong(); + + void TestAllMemoryOrders(); + + void TestAtomicStandalone(); + +private: + + int nErrorCount = 0; +}; + +template +void AtomicIntegralBasicTest::TestAtomicCtor() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } + + { + AtomicType atomic{ 20 }; + + IntegralType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 20); + } + + { + AtomicType atomic; + + IntegralType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicFetchAdd() +{ + { + AtomicType atomic; + + IntegralType ret = atomic.fetch_add(1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic.fetch_add(0, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 5 }; + + IntegralType ret = atomic.fetch_add(0, eastl::memory_order_relaxed); + + VERIFY(ret == 5); + + ret = atomic.fetch_add(4, eastl::memory_order_relaxed); + + VERIFY(ret == 5); + + ret = atomic.fetch_add(1, eastl::memory_order_relaxed); + + VERIFY(ret == 9); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 10); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicAddFetch() +{ + { + AtomicType atomic; + + IntegralType ret = atomic.add_fetch(1, eastl::memory_order_relaxed); + + VERIFY(ret == 1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic.add_fetch(0, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 5 }; + + IntegralType ret = atomic.add_fetch(0, eastl::memory_order_relaxed); + + VERIFY(ret == 5); + + ret = atomic.add_fetch(4, eastl::memory_order_relaxed); + + VERIFY(ret == 9); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 9); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicFetchSub() +{ + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); + + VERIFY(ret == 1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic.fetch_sub(0, eastl::memory_order_relaxed); + + VERIFY(ret == 1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } + + { + AtomicType atomic{ 5 }; + + IntegralType ret = atomic.fetch_sub(2, eastl::memory_order_relaxed); + + VERIFY(ret == 5); + + ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); + + VERIFY(ret == 3); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 2); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicSubFetch() +{ + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic.sub_fetch(0, eastl::memory_order_relaxed); + + VERIFY(ret == 1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } + + { + AtomicType atomic{ 5 }; + + IntegralType ret = atomic.sub_fetch(2, eastl::memory_order_relaxed); + + VERIFY(ret == 3); + + ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); + + VERIFY(ret == 2); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 2); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicFetchAnd() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.fetch_and(0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.fetch_and(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 0xF }; + + IntegralType ret = atomic.fetch_and(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0xF); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0X1); + } + + { + AtomicType atomic{ 0xF }; + + IntegralType ret = atomic.fetch_and(0xF0, eastl::memory_order_relaxed); + + VERIFY(ret == 0xF); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicAndFetch() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.and_fetch(0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.and_fetch(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 0xF }; + + IntegralType ret = atomic.and_fetch(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } + + { + AtomicType atomic{ 0xF }; + + IntegralType ret = atomic.and_fetch(0xF0, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicFetchOr() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.fetch_or(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic.fetch_or(0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic.fetch_or(0x2, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x3); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicOrFetch() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.or_fetch(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic.or_fetch(0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic.or_fetch(0x2, eastl::memory_order_relaxed); + + VERIFY(ret == 0x3); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x3); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicFetchXor() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.fetch_xor(0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic.fetch_xor(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + } + + { + AtomicType atomic{ 0x0 }; + + IntegralType ret = atomic.fetch_xor(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } +} + +template +void AtomicIntegralBasicTest::TestAtomicXorFetch() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.xor_fetch(0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic.xor_fetch(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + } + + { + AtomicType atomic{ 0x0 }; + + IntegralType ret = atomic.xor_fetch(0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } +} + +template +void AtomicIntegralBasicTest::TestAssignmentOperators() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = (atomic = 5); + + VERIFY(ret == 5); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 5); + } + + { + AtomicType atomic{ 0 }; + + IntegralType ret = ++atomic; + + VERIFY(ret == 1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } + + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic++; + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = --atomic; + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic--; + + VERIFY(ret == 1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic += 5; + + VERIFY(ret == 5); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 5); + } + + { + AtomicType atomic{ 5 }; + + IntegralType ret = atomic -= 3; + + VERIFY(ret == 2); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 2); + } + + { + AtomicType atomic{ 0x0 }; + + IntegralType ret = atomic |= 0x1; + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic &= 0x1; + + VERIFY(ret == 0x1); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic ^= 0x1; + + VERIFY(ret == 0x0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + } +} + +template +void AtomicIntegralBasicTest::TestIsLockFree() +{ + { + const AtomicType atomic{ 5 }; + + VERIFY(atomic.is_lock_free() == true); + + VERIFY(AtomicType::is_always_lock_free == true); + } +} + +template +void AtomicIntegralBasicTest::TestStore() +{ + { + AtomicType atomic{ 0 }; + + atomic.store(0, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + } + + { + AtomicType atomic{ 0 }; + + atomic.store(1, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } +} + +template +void AtomicIntegralBasicTest::TestLoad() +{ + { + AtomicType atomic{ 0 }; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + + bool ret = atomic == 0; + VERIFY(ret == true); + + VERIFY(atomic == 0); + } + + { + AtomicType atomic{ 5 }; + + VERIFY(atomic.load(eastl::memory_order_relaxed) == 5); + + bool ret = atomic == 5; + VERIFY(ret == true); + + VERIFY(atomic == 5); + } +} + +template +void AtomicIntegralBasicTest::TestExchange() +{ + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.exchange(0, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic{ 0 }; + + IntegralType ret = atomic.exchange(1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + + ret = atomic.load(eastl::memory_order_relaxed); + + VERIFY(ret == 1); + } +} + +template +void AtomicIntegralBasicTest::TestCompareExchangeWeak() +{ + { + AtomicType atomic{ 0 }; + + IntegralType observed = 0; + bool ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed); + + if (ret == true) + { + VERIFY(ret == true); + VERIFY(observed == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + } + + { + AtomicType atomic{ 0 }; + + IntegralType observed = 1; + bool ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + } +} + +template +void AtomicIntegralBasicTest::TestCompareExchangeStrong() +{ + { + AtomicType atomic{ 0 }; + + IntegralType observed = 0; + bool ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed); + + VERIFY(ret == true); + VERIFY(observed == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic{ 0 }; + + IntegralType observed = 1; + bool ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed); + + VERIFY(ret == false); + VERIFY(observed == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + } +} + +template +void AtomicIntegralBasicTest::TestAllMemoryOrders() +{ + { + AtomicType atomic{}; + + atomic.store(1); + + atomic.store(1, eastl::memory_order_relaxed); + + atomic.store(1, eastl::memory_order_release); + + atomic.store(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.load(); + + ret = atomic.load(eastl::memory_order_relaxed); + + ret = atomic.load(eastl::memory_order_acquire); + + ret = atomic.load(eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.exchange(1); + + ret = atomic.exchange(1, eastl::memory_order_relaxed); + + ret = atomic.exchange(1, eastl::memory_order_acquire); + + ret = atomic.exchange(1, eastl::memory_order_release); + + ret = atomic.exchange(1, eastl::memory_order_acq_rel); + + ret = atomic.exchange(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.fetch_add(1); + + ret = atomic.fetch_add(1, eastl::memory_order_relaxed); + + ret = atomic.fetch_add(1, eastl::memory_order_acquire); + + ret = atomic.fetch_add(1, eastl::memory_order_release); + + ret = atomic.fetch_add(1, eastl::memory_order_acq_rel); + + ret = atomic.fetch_add(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.add_fetch(1); + + ret = atomic.add_fetch(1, eastl::memory_order_relaxed); + + ret = atomic.add_fetch(1, eastl::memory_order_acquire); + + ret = atomic.add_fetch(1, eastl::memory_order_release); + + ret = atomic.add_fetch(1, eastl::memory_order_acq_rel); + + ret = atomic.add_fetch(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.fetch_sub(1); + + ret = atomic.fetch_sub(1, eastl::memory_order_relaxed); + + ret = atomic.fetch_sub(1, eastl::memory_order_acquire); + + ret = atomic.fetch_sub(1, eastl::memory_order_release); + + ret = atomic.fetch_sub(1, eastl::memory_order_acq_rel); + + ret = atomic.fetch_sub(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.sub_fetch(1); + + ret = atomic.sub_fetch(1, eastl::memory_order_relaxed); + + ret = atomic.sub_fetch(1, eastl::memory_order_acquire); + + ret = atomic.sub_fetch(1, eastl::memory_order_release); + + ret = atomic.sub_fetch(1, eastl::memory_order_acq_rel); + + ret = atomic.sub_fetch(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.fetch_and(1); + + ret = atomic.fetch_and(1, eastl::memory_order_relaxed); + + ret = atomic.fetch_and(1, eastl::memory_order_acquire); + + ret = atomic.fetch_and(1, eastl::memory_order_release); + + ret = atomic.fetch_and(1, eastl::memory_order_acq_rel); + + ret = atomic.fetch_and(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.and_fetch(1); + + ret = atomic.and_fetch(1, eastl::memory_order_relaxed); + + ret = atomic.and_fetch(1, eastl::memory_order_acquire); + + ret = atomic.and_fetch(1, eastl::memory_order_release); + + ret = atomic.and_fetch(1, eastl::memory_order_acq_rel); + + ret = atomic.and_fetch(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.fetch_or(1); + + ret = atomic.fetch_or(1, eastl::memory_order_relaxed); + + ret = atomic.fetch_or(1, eastl::memory_order_acquire); + + ret = atomic.fetch_or(1, eastl::memory_order_release); + + ret = atomic.fetch_or(1, eastl::memory_order_acq_rel); + + ret = atomic.fetch_or(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.or_fetch(1); + + ret = atomic.or_fetch(1, eastl::memory_order_relaxed); + + ret = atomic.or_fetch(1, eastl::memory_order_acquire); + + ret = atomic.or_fetch(1, eastl::memory_order_release); + + ret = atomic.or_fetch(1, eastl::memory_order_acq_rel); + + ret = atomic.or_fetch(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.fetch_xor(1); + + ret = atomic.fetch_xor(1, eastl::memory_order_relaxed); + + ret = atomic.fetch_xor(1, eastl::memory_order_acquire); + + ret = atomic.fetch_xor(1, eastl::memory_order_release); + + ret = atomic.fetch_xor(1, eastl::memory_order_acq_rel); + + ret = atomic.fetch_xor(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType ret = atomic.xor_fetch(1); + + ret = atomic.xor_fetch(1, eastl::memory_order_relaxed); + + ret = atomic.xor_fetch(1, eastl::memory_order_acquire); + + ret = atomic.xor_fetch(1, eastl::memory_order_release); + + ret = atomic.xor_fetch(1, eastl::memory_order_acq_rel); + + ret = atomic.xor_fetch(1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType observed = 0; + bool ret; + + ret = atomic.compare_exchange_weak(observed, 1); + + ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_release); + + ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_weak(observed, 1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType observed = 0; + bool ret; + + ret = atomic.compare_exchange_strong(observed, 1); + + ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_release); + + ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_acq_rel); + + ret = atomic.compare_exchange_strong(observed, 1, eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType observed = 0; + bool ret; + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_relaxed, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_acquire, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_acquire, + eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_release, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_acq_rel, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_acq_rel, + eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_seq_cst, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_seq_cst, + eastl::memory_order_acquire); + + ret = atomic.compare_exchange_weak(observed, 1, + eastl::memory_order_seq_cst, + eastl::memory_order_seq_cst); + } + + { + AtomicType atomic{}; + + IntegralType observed = 0; + bool ret; + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_relaxed, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_acquire, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_acquire, + eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_release, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_acq_rel, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_acq_rel, + eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_seq_cst, + eastl::memory_order_relaxed); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_seq_cst, + eastl::memory_order_acquire); + + ret = atomic.compare_exchange_strong(observed, 1, + eastl::memory_order_seq_cst, + eastl::memory_order_seq_cst); + } + +} + +template +void AtomicIntegralBasicTest::TestAtomicStandalone() +{ + { + AtomicType atomic; + + IntegralType expected = 0; + bool ret = atomic_compare_exchange_weak(&atomic, &expected, 1); + + if (ret) + { + VERIFY(ret == true); + + VERIFY(expected == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + } + + { + AtomicType atomic; + + IntegralType expected = 0; + bool ret = atomic_compare_exchange_weak_explicit(&atomic, &expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + if (ret) + { + VERIFY(ret == true); + + VERIFY(expected == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + } + + { + AtomicType atomic; + + IntegralType expected = 0; + bool ret = atomic_compare_exchange_strong(&atomic, &expected, 1); + + VERIFY(ret == true); + + VERIFY(expected == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType expected = 0; + bool ret = atomic_compare_exchange_strong_explicit(&atomic, &expected, 1, eastl::memory_order_relaxed, eastl::memory_order_relaxed); + + VERIFY(ret == true); + + VERIFY(expected == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_fetch_xor(&atomic, 0x1); + + VERIFY(ret == 0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_fetch_xor_explicit(&atomic, 0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_xor_fetch(&atomic, 0x1); + + VERIFY(ret == 0x1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_xor_fetch_explicit(&atomic, 0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_fetch_or(&atomic, 0x1); + + VERIFY(ret == 0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_fetch_or_explicit(&atomic, 0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_or_fetch(&atomic, 0x1); + + VERIFY(ret == 0x1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_or_fetch_explicit(&atomic, 0x1, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x1); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic_fetch_and(&atomic, 0x0); + + VERIFY(ret == 0x1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic_fetch_and_explicit(&atomic, 0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0x1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic_and_fetch(&atomic, 0x0); + + VERIFY(ret == 0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); + } + + { + AtomicType atomic{ 0x1 }; + + IntegralType ret = atomic_and_fetch_explicit(&atomic, 0x0, eastl::memory_order_relaxed); + + VERIFY(ret == 0x0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0x0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic_fetch_sub(&atomic, 1); + + VERIFY(ret == 1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic_fetch_sub_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == 1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic_sub_fetch(&atomic, 1); + + VERIFY(ret == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + } + + { + AtomicType atomic{ 1 }; + + IntegralType ret = atomic_sub_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 0); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_fetch_add(&atomic, 1); + + VERIFY(ret == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_fetch_add_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_add_fetch(&atomic, 1); + + VERIFY(ret == 1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_add_fetch_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == 1); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_exchange(&atomic, 1); + + VERIFY(ret == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_exchange_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_load(&atomic); + + VERIFY(ret == 0); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_load_explicit(&atomic, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_load_cond(&atomic, [](IntegralType val) { return true; }); + + VERIFY(ret == 0); + } + + { + AtomicType atomic; + + IntegralType ret = atomic_load_cond_explicit(&atomic, [](IntegralType val) { return true; }, eastl::memory_order_relaxed); + + VERIFY(ret == 0); + } + + { + AtomicType atomic; + + atomic_store(&atomic, 1); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + atomic_store_explicit(&atomic, 1, eastl::memory_order_relaxed); + + VERIFY(atomic.load(eastl::memory_order_relaxed) == 1); + } + + { + AtomicType atomic; + + VERIFY(atomic_is_lock_free(&atomic) == true); + } +} + +struct AtomicNonDefaultConstructible +{ + AtomicNonDefaultConstructible(uint8_t a) + : a(a) + { + } + + friend bool operator==(const AtomicNonDefaultConstructible& a, const AtomicNonDefaultConstructible& b) + { + return a.a == b.a; + } + + uint8_t a; +}; + +#if defined(EASTL_ATOMIC_HAS_8BIT) + +int TestAtomicNonDefaultConstructible() +{ + int nErrorCount = 0; + + { + eastl::atomic atomic{AtomicNonDefaultConstructible{(uint8_t)3}}; + + VERIFY(atomic.load() == AtomicNonDefaultConstructible{(uint8_t)3}); + } + + { + eastl::atomic atomic{AtomicNonDefaultConstructible{(uint8_t)3}}; + + atomic.store(AtomicNonDefaultConstructible{(uint8_t)4}); + + VERIFY(atomic.load() == AtomicNonDefaultConstructible{(uint8_t)4}); + } + + return nErrorCount; +} + +#endif + +struct Atomic128LoadType +{ + friend bool operator==(const Atomic128LoadType& a, const Atomic128LoadType& b) + { + return a.a == b.a && a.b == b.b && a.c == b.c && a.d == b.d; + } + + uint32_t a, b, c, d; +}; + +#if defined(EASTL_ATOMIC_HAS_128BIT) + +int TestAtomic128Loads() +{ + int nErrorCount = 0; + + { + eastl::atomic atomic{Atomic128LoadType{1, 1, 0, 0}}; + + VERIFY((atomic.load() == Atomic128LoadType{1, 1, 0, 0})); + } + + { + eastl::atomic atomic{Atomic128LoadType{0, 0, 1, 1}}; + + VERIFY((atomic.load() == Atomic128LoadType{0, 0, 1, 1})); + } + + { + eastl::atomic atomic{Atomic128LoadType{0, 1, 0, 1}}; + + VERIFY((atomic.load() == Atomic128LoadType{0, 1, 0, 1})); + } + + { + eastl::atomic atomic{Atomic128LoadType{1, 0, 1, 0}}; + + VERIFY((atomic.load() == Atomic128LoadType{1, 0, 1, 0})); + } + + return nErrorCount; +} + +#endif + +int TestAtomicBasic() +{ + int nErrorCount = 0; + + #if defined(EASTL_ATOMIC_HAS_8BIT) + { + AtomicIntegralBasicTest u8AtomicTest; + + nErrorCount += u8AtomicTest.RunTest(); + } + #endif + + #if defined(EASTL_ATOMIC_HAS_16BIT) + { + AtomicIntegralBasicTest u16AtomicTest; + + nErrorCount += u16AtomicTest.RunTest(); + } + #endif + + #if defined(EASTL_ATOMIC_HAS_32BIT) + { + AtomicIntegralBasicTest u32AtomicTest; + + nErrorCount += u32AtomicTest.RunTest(); + } + #endif + + #if defined(EASTL_ATOMIC_HAS_64BIT) + { + AtomicIntegralBasicTest u64AtomicTest; + + nErrorCount += u64AtomicTest.RunTest(); + } + #endif + + #if defined(EASTL_ATOMIC_HAS_128BIT) && (defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) + { + AtomicIntegralBasicTest<__uint128_t> u128AtomicTest; + + nErrorCount += u128AtomicTest.RunTest(); + } + + { + AtomicIntegralBasicTest u128AtomicTest; + + nErrorCount += u128AtomicTest.RunTest(); + } + #endif + + { + AtomicBoolBasicTest boolAtomicTest; + + nErrorCount += boolAtomicTest.RunTest(); + } + + #if defined(EASTL_ATOMIC_HAS_16BIT) + { + AtomicUserTypeBasicTest userTypeAtomicTest; + + nErrorCount += userTypeAtomicTest.RunTest(); + } + #endif + + #if defined(EASTL_ATOMIC_HAS_32BIT) + { + AtomicUserTypeBasicTest userTypeAtomicTest; + + nErrorCount += userTypeAtomicTest.RunTest(); + } + + { + AtomicUserTypeBasicTest userTypeAtomicTest; + + nErrorCount += userTypeAtomicTest.RunTest(); + } + #endif + + #if defined(EASTL_ATOMIC_HAS_128BIT) + { + AtomicUserTypeBasicTest userTypeAtomicTest; + + nErrorCount += userTypeAtomicTest.RunTest(); + } + #endif + + { + AtomicPointerBasicTest ptrAtomicTest; + + nErrorCount += ptrAtomicTest.RunTest(); + } + + { + AtomicVoidPointerBasicTest voidPtrAtomicTest; + + nErrorCount += voidPtrAtomicTest.RunTest(); + } + + { + AtomicFlagBasicTest atomicFlagBasicTest; + + nErrorCount += atomicFlagBasicTest.RunTest(); + } + + { + AtomicStandaloneBasicTest atomicStandaloneBasicTest; + + nErrorCount += atomicStandaloneBasicTest.RunTest(); + } + +#if defined(EASTL_ATOMIC_HAS_128BIT) + + nErrorCount += TestAtomic128Loads(); + +#endif + +#if defined(EASTL_ATOMIC_HAS_8BIT) + + nErrorCount += TestAtomicNonDefaultConstructible(); + +#endif + + return nErrorCount; +} diff --git a/test/source/TestSet.cpp b/test/source/TestSet.cpp index 14159056..1adc12f1 100644 --- a/test/source/TestSet.cpp +++ b/test/source/TestSet.cpp @@ -11,6 +11,7 @@ #include #include + EA_DISABLE_ALL_VC_WARNINGS() #include @@ -50,6 +51,49 @@ typedef eastl::multiset VMS4; /////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +// xvalue_test +// +// Test utility type that sets the class data to known value when its data has +// has been moved out. This enables us to write tests that verify that the +// destruction action taken on container elements occured during move operations. +// +struct xvalue_test +{ + static const int MOVED_FROM = -1; + + int data = 42; + + xvalue_test(int in) : data(in) {} + ~xvalue_test() = default; + + xvalue_test(const xvalue_test& other) + : data(other.data) {} + + xvalue_test& operator=(const xvalue_test& other) + { + data = other.data; + return *this; + } + + xvalue_test(xvalue_test&& other) + { + data = other.data; + other.data = MOVED_FROM; + } + + xvalue_test& operator=(xvalue_test&& other) + { + data = other.data; + other.data = MOVED_FROM; + return *this; + } + + friend bool operator<(const xvalue_test& rhs, const xvalue_test& lhs) + { return rhs.data < lhs.data; } +}; + + int TestSet() { @@ -128,6 +172,30 @@ int TestSet() VERIFY((s == multiset{1, 1, 1, 3, 3, 3})); } + { + // user reported regression: ensure container elements are NOT + // moved from during the eastl::set construction process. + eastl::vector m1 = {{0}, {1}, {2}, {3}, {4}, {5}}; + eastl::set m2{m1.begin(), m1.end()}; + + bool result = eastl::all_of(m1.begin(), m1.end(), + [&](auto& e) { return e.data != xvalue_test::MOVED_FROM; }); + + VERIFY(result); + } + + { + // user reported regression: ensure container elements are moved from during the + // eastl::set construction process when using an eastl::move_iterator. + eastl::vector m1 = {{0}, {1}, {2}, {3}, {4}, {5}}; + eastl::set m2{eastl::make_move_iterator(m1.begin()), eastl::make_move_iterator(m1.end())}; + + bool result = eastl::all_of(m1.begin(), m1.end(), + [&](auto& e) { return e.data == xvalue_test::MOVED_FROM; }); + + VERIFY(result); + } + return nErrorCount; }