Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * generic-gcc.h
4 : * Atomic operations, implemented using gcc (or compatible) intrinsics.
5 : *
6 : * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * NOTES:
10 : *
11 : * Documentation:
12 : * * Legacy __sync Built-in Functions for Atomic Memory Access
13 : * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
14 : * * Built-in functions for memory model aware atomic operations
15 : * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
16 : *
17 : * src/include/port/atomics/generic-gcc.h
18 : *
19 : *-------------------------------------------------------------------------
20 : */
21 :
22 : /* intentionally no include guards, should only be included by atomics.h */
23 : #ifndef INSIDE_ATOMICS_H
24 : #error "should be included via atomics.h"
25 : #endif
26 :
27 : /*
28 : * An empty asm block should be a sufficient compiler barrier.
29 : */
30 : #define pg_compiler_barrier_impl() __asm__ __volatile__("" ::: "memory")
31 :
32 : /*
33 : * If we're on GCC 4.1.0 or higher, we should be able to get a memory barrier
34 : * out of this compiler built-in. But we prefer to rely on platform specific
35 : * definitions where possible, and use this only as a fallback.
36 : */
37 : #if !defined(pg_memory_barrier_impl)
38 : # if defined(HAVE_GCC__ATOMIC_INT32_CAS)
39 : # define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
40 : # elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
41 : # define pg_memory_barrier_impl() __sync_synchronize()
42 : # endif
43 : #endif /* !defined(pg_memory_barrier_impl) */
44 :
45 : #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
46 : /* acquire semantics include read barrier semantics */
47 : # define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
48 : #endif
49 :
50 : #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
51 : /* release semantics include write barrier semantics */
52 : # define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
53 : #endif
54 :
55 :
56 : #ifdef HAVE_ATOMICS
57 :
58 : /* generic gcc based atomic flag implementation */
59 : #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
60 : && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
61 :
62 : #define PG_HAVE_ATOMIC_FLAG_SUPPORT
63 : typedef struct pg_atomic_flag
64 : {
65 : /*
66 : * If we have a choice, use int-width TAS, because that is more efficient
67 : * and/or more reliably implemented on most non-Intel platforms. (Note
68 : * that this code isn't used on x86[_64]; see arch-x86.h for that.)
69 : */
70 : #ifdef HAVE_GCC__SYNC_INT32_TAS
71 : volatile int value;
72 : #else
73 : volatile char value;
74 : #endif
75 : } pg_atomic_flag;
76 :
77 : #endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
78 :
79 : /* generic gcc based atomic uint32 implementation */
80 : #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
81 : && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
82 :
83 : #define PG_HAVE_ATOMIC_U32_SUPPORT
84 : typedef struct pg_atomic_uint32
85 : {
86 : volatile uint32 value;
87 : } pg_atomic_uint32;
88 :
89 : #endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
90 :
91 : /* generic gcc based atomic uint64 implementation */
92 : #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
93 : && !defined(PG_DISABLE_64_BIT_ATOMICS) \
94 : && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
95 :
96 : #define PG_HAVE_ATOMIC_U64_SUPPORT
97 :
98 : typedef struct pg_atomic_uint64
99 : {
100 : volatile uint64 value pg_attribute_aligned(8);
101 : } pg_atomic_uint64;
102 :
103 : #endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
104 :
105 : #ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
106 :
107 : #if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
108 :
109 : #ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
110 : #define PG_HAVE_ATOMIC_TEST_SET_FLAG
111 : static inline bool
112 : pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
113 : {
114 : /* NB: only an acquire barrier, not a full one */
115 : /* some platform only support a 1 here */
116 : return __sync_lock_test_and_set(&ptr->value, 1) == 0;
117 : }
118 : #endif
119 :
120 : #endif /* defined(HAVE_GCC__SYNC_*_TAS) */
121 :
122 : #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
123 : #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
124 : static inline bool
125 3 : pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
126 : {
127 3 : return ptr->value == 0;
128 : }
129 : #endif
130 :
131 : #ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
132 : #define PG_HAVE_ATOMIC_CLEAR_FLAG
133 : static inline void
134 : pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
135 : {
136 : __sync_lock_release(&ptr->value);
137 : }
138 : #endif
139 :
140 : #ifndef PG_HAVE_ATOMIC_INIT_FLAG
141 : #define PG_HAVE_ATOMIC_INIT_FLAG
142 : static inline void
143 1 : pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
144 : {
145 1 : pg_atomic_clear_flag_impl(ptr);
146 1 : }
147 : #endif
148 :
149 : #endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
150 :
151 : /* prefer __atomic, it has a better API */
152 : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
153 : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
154 : static inline bool
155 : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
156 : uint32 *expected, uint32 newval)
157 : {
158 : /* FIXME: we can probably use a lower consistency model */
159 : return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
160 : __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
161 : }
162 : #endif
163 :
164 : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
165 : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
166 : static inline bool
167 : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
168 : uint32 *expected, uint32 newval)
169 : {
170 : bool ret;
171 : uint32 current;
172 : current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
173 : ret = current == *expected;
174 : *expected = current;
175 : return ret;
176 : }
177 : #endif
178 :
179 : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
180 : #define PG_HAVE_ATOMIC_FETCH_ADD_U32
181 : static inline uint32
182 : pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
183 : {
184 : return __sync_fetch_and_add(&ptr->value, add_);
185 : }
186 : #endif
187 :
188 :
189 : #if !defined(PG_DISABLE_64_BIT_ATOMICS)
190 :
191 : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
192 : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
193 : static inline bool
194 37 : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
195 : uint64 *expected, uint64 newval)
196 : {
197 37 : return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
198 : __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
199 : }
200 : #endif
201 :
202 : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
203 : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
204 : static inline bool
205 : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
206 : uint64 *expected, uint64 newval)
207 : {
208 : bool ret;
209 : uint64 current;
210 : current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
211 : ret = current == *expected;
212 : *expected = current;
213 : return ret;
214 : }
215 : #endif
216 :
217 : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
218 : #define PG_HAVE_ATOMIC_FETCH_ADD_U64
219 : static inline uint64
220 3168 : pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
221 : {
222 3168 : return __sync_fetch_and_add(&ptr->value, add_);
223 : }
224 : #endif
225 :
226 : #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
227 :
228 : #endif /* defined(HAVE_ATOMICS) */
|