33
33
34
34
// TODO: If C++11, use #include <atomic>.
35
35
36
-
37
36
#if defined(_MSC_VER)
38
37
39
38
#include < windows.h>
40
39
41
- int CPLAtomicAdd (volatile int * ptr, int increment)
40
+ int CPLAtomicAdd (volatile int * ptr, int increment)
42
41
{
43
- return InterlockedExchangeAdd ((volatile LONG*)(ptr),
44
- (LONG)(increment)) + increment;
42
+ return InterlockedExchangeAdd ((volatile LONG *)(ptr), (LONG)(increment)) +
43
+ increment;
45
44
}
46
45
47
- int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
46
+ int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
48
47
{
49
- return (LONG)InterlockedCompareExchange ((volatile LONG*)(ptr), (LONG)newval,
50
- (LONG)oldval) == (LONG)oldval;
48
+ return (LONG)InterlockedCompareExchange ((volatile LONG *)(ptr),
49
+ (LONG)newval,
50
+ (LONG)oldval) == (LONG)oldval;
51
51
}
52
52
53
53
#elif defined(__MINGW32__) && defined(__i386__)
54
54
55
55
#include < windows.h>
56
56
57
- int CPLAtomicAdd (volatile int * ptr, int increment)
57
+ int CPLAtomicAdd (volatile int * ptr, int increment)
58
58
{
59
- return InterlockedExchangeAdd ((LONG*)(ptr), (LONG)(increment)) + increment;
59
+ return InterlockedExchangeAdd ((LONG *)(ptr), (LONG)(increment)) + increment;
60
60
}
61
61
62
- int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
62
+ int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
63
63
{
64
- return (LONG)InterlockedCompareExchange ((LONG*)(ptr), (LONG)newval,
65
- (LONG)oldval) == (LONG)oldval;
64
+ return (LONG)InterlockedCompareExchange ((LONG *)(ptr), (LONG)newval,
65
+ (LONG)oldval) == (LONG)oldval;
66
66
}
67
67
68
68
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
69
69
70
- int CPLAtomicAdd (volatile int * ptr, int increment)
70
+ int CPLAtomicAdd (volatile int * ptr, int increment)
71
71
{
72
- int temp = increment;
73
- __asm__ __volatile__ (" lock; xaddl %0,%1"
74
- : " +r" (temp), " +m" (*ptr)
75
- : : " memory" );
76
- return temp + increment;
72
+ int temp = increment;
73
+ __asm__ __volatile__ (" lock; xaddl %0,%1"
74
+ : " +r" (temp), " +m" (*ptr)
75
+ :
76
+ : " memory" );
77
+ return temp + increment;
77
78
}
78
79
79
- int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
80
+ int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
80
81
{
81
82
unsigned char ret;
82
83
83
- __asm__ __volatile__ (
84
- " lock; cmpxchgl %2,%1\n "
85
- " sete %0\n "
86
- : " =q" (ret), " =m" (*ptr)
87
- : " r" (newval), " m" (*ptr), " a" (oldval)
88
- : " memory" );
84
+ __asm__ __volatile__ (" lock; cmpxchgl %2,%1\n "
85
+ " sete %0\n "
86
+ : " =q" (ret), " =m" (*ptr)
87
+ : " r" (newval), " m" (*ptr), " a" (oldval)
88
+ : " memory" );
89
89
90
90
return static_cast <int >(ret);
91
91
}
@@ -97,46 +97,46 @@ int CPLAtomicCompareAndExchange(volatile int* ptr, int oldval, int newval)
97
97
// We use a ./configure test to determine whether this builtins are available.
98
98
// as it appears that the GCC 4.1 version used on debian etch is broken when
99
99
// linking such instructions.
100
- int CPLAtomicAdd ( volatile int * ptr, int increment )
100
+ int CPLAtomicAdd (volatile int * ptr, int increment)
101
101
{
102
- if ( increment > 0 )
103
- return __sync_add_and_fetch (ptr, increment);
102
+ if ( increment > 0 )
103
+ return __sync_add_and_fetch (ptr, increment);
104
104
105
- return __sync_sub_and_fetch (ptr, -increment);
105
+ return __sync_sub_and_fetch (ptr, -increment);
106
106
}
107
107
108
- int CPLAtomicCompareAndExchange ( volatile int * ptr, int oldval, int newval )
108
+ int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
109
109
{
110
- return __sync_bool_compare_and_swap (ptr, oldval, newval);
110
+ return __sync_bool_compare_and_swap (ptr, oldval, newval);
111
111
}
112
112
113
113
#elif defined(__MACH__) && defined(__APPLE__)
114
114
115
115
#include < libkern/OSAtomic.h>
116
116
117
- int CPLAtomicAdd (volatile int * ptr, int increment)
117
+ int CPLAtomicAdd (volatile int * ptr, int increment)
118
118
{
119
- return OSAtomicAdd32 (increment, (int *)(ptr));
119
+ return OSAtomicAdd32 (increment, (int *)(ptr));
120
120
}
121
121
122
- int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
122
+ int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
123
123
{
124
- return OSAtomicCompareAndSwap32 (oldval, newval, (int *)(ptr));
124
+ return OSAtomicCompareAndSwap32 (oldval, newval, (int *)(ptr));
125
125
}
126
126
127
127
#elif !defined(CPL_MULTIPROC_PTHREAD)
128
128
#warning "Needs real lock API to implement properly atomic increment"
129
129
130
130
// Dummy implementation.
131
- int CPLAtomicAdd (volatile int * ptr, int increment)
131
+ int CPLAtomicAdd (volatile int * ptr, int increment)
132
132
{
133
133
(*ptr) += increment;
134
134
return *ptr;
135
135
}
136
136
137
- int CPLAtomicCompareAndExchange ( volatile int * ptr, int oldval, int newval )
137
+ int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
138
138
{
139
- if ( *ptr == oldval )
139
+ if ( *ptr == oldval)
140
140
{
141
141
*ptr = newval;
142
142
return TRUE ;
@@ -151,17 +151,17 @@ int CPLAtomicCompareAndExchange( volatile int* ptr, int oldval, int newval )
151
151
static CPLLock *hAtomicOpLock = nullptr ;
152
152
153
153
// Slow, but safe, implementation using a mutex.
154
- int CPLAtomicAdd (volatile int * ptr, int increment)
154
+ int CPLAtomicAdd (volatile int * ptr, int increment)
155
155
{
156
156
CPLLockHolderD (&hAtomicOpLock, LOCK_SPIN);
157
157
(*ptr) += increment;
158
158
return *ptr;
159
159
}
160
160
161
- int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
161
+ int CPLAtomicCompareAndExchange (volatile int * ptr, int oldval, int newval)
162
162
{
163
163
CPLLockHolderD (&hAtomicOpLock, LOCK_SPIN);
164
- if ( *ptr == oldval )
164
+ if ( *ptr == oldval)
165
165
{
166
166
*ptr = newval;
167
167
return TRUE ;
0 commit comments