|
| 1 | +#pragma once |
| 2 | + |
| 3 | +enum memory_order { |
| 4 | + MO_RELAXED = __ATOMIC_RELAXED, |
| 5 | + MO_CONSUME = __ATOMIC_CONSUME, |
| 6 | + MO_ACQ_REL = __ATOMIC_ACQ_REL, |
| 7 | + MO_ACQUIRE = __ATOMIC_ACQUIRE, |
| 8 | + MO_RELEASE = __ATOMIC_RELEASE, |
| 9 | + MO_SEQ_CST = __ATOMIC_SEQ_CST, |
| 10 | +}; |
| 11 | + |
| 12 | +/* |
| 13 | + * Prevents the compiler from reordering code around the barrier, has no effect |
| 14 | + * on CPU reordering. |
| 15 | + */ |
| 16 | +#define compiler_barrier() __atomic_signal_fence(MO_ACQ_REL) |
| 17 | + |
| 18 | +/* |
| 19 | + * Makes all past atomic loads acquire-loads, no future loads/stores |
| 20 | + * can be reordered before the last atomic load. |
| 21 | + */ |
| 22 | +#define barrier_acquire() \ |
| 23 | + ({ compiler_barrier(); __atomic_thread_fence(MO_ACQUIRE); }) |
| 24 | + |
| 25 | +/* |
| 26 | + * Makes all following atomic stores release-stores, no previous |
| 27 | + * loads/stores can be reordered after the first atomic store. |
| 28 | + */ |
| 29 | +#define barrier_release() \ |
| 30 | + ({ compiler_barrier(); __atomic_thread_fence(MO_RELEASE); }) |
| 31 | + |
| 32 | +/* |
| 33 | + * A stronger combination of the above barriers, makes all surrounding |
| 34 | + * atomic operations sequentially consistent. |
| 35 | + */ |
| 36 | +#define barrier_full() \ |
| 37 | + ({ compiler_barrier(); __atomic_thread_fence(MO_SEQ_CST); }) |
| 38 | + |
| 39 | +#define atomic_load_explicit(ptr, mo) __atomic_load_n(ptr, mo) |
| 40 | +#define atomic_load_relaxed(ptr) atomic_load_explicit(ptr, MO_RELAXED) |
| 41 | +#define atomic_load_acquire(ptr) atomic_load_explicit(ptr, MO_ACQUIRE) |
| 42 | +#define atomic_load_seq_cst(ptr) atomic_load_explicit(ptr, MO_SEQ_CST) |
| 43 | + |
| 44 | +#define atomic_store_explicit(ptr, x, mo) __atomic_store_n(ptr, x, mo) |
| 45 | +#define atomic_store_relaxed(ptr, x) atomic_store_explicit(ptr, x, MO_RELAXED) |
| 46 | +#define atomic_store_release(ptr, x) atomic_store_explicit(ptr, x, MO_RELEASE) |
| 47 | +#define atomic_store_seq_cst(ptr, x) atomic_store_explicit(ptr, x, MO_SEQ_CST) |
| 48 | + |
| 49 | +#define atomic_add_fetch(ptr, x, mo) __atomic_add_fetch(ptr, x, mo) |
| 50 | +#define atomic_sub_fetch(ptr, x, mo) __atomic_sub_fetch(ptr, x, mo) |
| 51 | +#define atomic_and_fetch(ptr, x, mo) __atomic_and_fetch(ptr, x, mo) |
| 52 | +#define atomic_or_fetch(ptr, x, mo) __atomic_or_fetch(ptr, x, mo) |
| 53 | +#define atomic_xor_fetch(ptr, x, mo) __atomic_xor_fetch(ptr, x, mo) |
| 54 | +#define atomic_xchg(ptr, x, mo) __atomic_exchange_n(ptr, x, mo) |
| 55 | + |
| 56 | +#define atomic_cmpxchg_explicit(ptr, expected, desired, success_mo, fail_mo) \ |
| 57 | + __atomic_compare_exchange_n(ptr, &expected, desired, 0, success_mo, fail_mo) |
| 58 | + |
| 59 | +#define atomic_cmpxchg_acq_rel(ptr, expected, desired) \ |
| 60 | + atomic_cmpxchg_explicit(ptr, expected, desired, MO_ACQ_REL, MO_ACQUIRE) |
0 commit comments