1 | // RUN: %clang_builtins %s %librt -o %t && %run %t |
2 | // REQUIRES: librt_has_atomic |
3 | //===-- atomic_test.c - Test support functions for atomic operations ------===// |
4 | // |
5 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
6 | // See https://llvm.org/LICENSE.txt for license information. |
7 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
8 | // |
9 | //===----------------------------------------------------------------------===// |
10 | // |
11 | // This file performs some simple testing of the support functions for the |
12 | // atomic builtins. All tests are single-threaded, so this is only a sanity |
13 | // check. |
14 | // |
15 | //===----------------------------------------------------------------------===// |
16 | |
17 | #include <stdbool.h> |
18 | #include <stdint.h> |
19 | #include <stdio.h> |
20 | #include <stdlib.h> |
21 | #include <string.h> |
22 | #undef NDEBUG |
23 | #include <assert.h> |
24 | |
25 | // We directly test the library atomic functions, not using the C builtins. This |
26 | // should avoid confounding factors, ensuring that we actually test the |
27 | // functions themselves, regardless of how the builtins are lowered. We need to |
28 | // use asm labels because we can't redeclare the builtins. |
29 | // Note: we need to prepend an underscore to this name for e.g. macOS. |
30 | #define _STRINGIFY(x) #x |
31 | #define STRINGIFY(x) _STRINGIFY(x) |
32 | #define EXTERNAL_NAME(name) asm(STRINGIFY(__USER_LABEL_PREFIX__) #name) |
33 | |
34 | bool __atomic_is_lock_free_c(size_t size, void *ptr) |
35 | EXTERNAL_NAME(__atomic_is_lock_free); |
36 | |
37 | void __atomic_load_c(int size, void *src, void *dest, |
38 | int model) EXTERNAL_NAME(__atomic_load); |
39 | |
40 | uint8_t __atomic_load_1(uint8_t *src, int model); |
41 | uint16_t __atomic_load_2(uint16_t *src, int model); |
42 | uint32_t __atomic_load_4(uint32_t *src, int model); |
43 | uint64_t __atomic_load_8(uint64_t *src, int model); |
44 | |
45 | void __atomic_store_c(int size, void *dest, const void *src, |
46 | int model) EXTERNAL_NAME(__atomic_store); |
47 | |
48 | void __atomic_store_1(uint8_t *dest, uint8_t val, int model); |
49 | void __atomic_store_2(uint16_t *dest, uint16_t val, int model); |
50 | void __atomic_store_4(uint32_t *dest, uint32_t val, int model); |
51 | void __atomic_store_8(uint64_t *dest, uint64_t val, int model); |
52 | |
53 | void __atomic_exchange_c(int size, void *ptr, const void *val, void *old, |
54 | int model) EXTERNAL_NAME(__atomic_exchange); |
55 | |
56 | uint8_t __atomic_exchange_1(uint8_t *dest, uint8_t val, int model); |
57 | uint16_t __atomic_exchange_2(uint16_t *dest, uint16_t val, int model); |
58 | uint32_t __atomic_exchange_4(uint32_t *dest, uint32_t val, int model); |
59 | uint64_t __atomic_exchange_8(uint64_t *dest, uint64_t val, int model); |
60 | |
61 | int __atomic_compare_exchange_c(int size, void *ptr, void *expected, |
62 | const void *desired, int success, int failure) |
63 | EXTERNAL_NAME(__atomic_compare_exchange); |
64 | |
65 | bool __atomic_compare_exchange_1(uint8_t *ptr, uint8_t *expected, |
66 | uint8_t desired, int success, int failure); |
67 | bool __atomic_compare_exchange_2(uint16_t *ptr, uint16_t *expected, |
68 | uint16_t desired, int success, int failure); |
69 | bool __atomic_compare_exchange_4(uint32_t *ptr, uint32_t *expected, |
70 | uint32_t desired, int success, int failure); |
71 | bool __atomic_compare_exchange_8(uint64_t *ptr, uint64_t *expected, |
72 | uint64_t desired, int success, int failure); |
73 | |
74 | uint8_t __atomic_fetch_add_1(uint8_t *ptr, uint8_t val, int model); |
75 | uint16_t __atomic_fetch_add_2(uint16_t *ptr, uint16_t val, int model); |
76 | uint32_t __atomic_fetch_add_4(uint32_t *ptr, uint32_t val, int model); |
77 | uint64_t __atomic_fetch_add_8(uint64_t *ptr, uint64_t val, int model); |
78 | |
79 | uint8_t __atomic_fetch_sub_1(uint8_t *ptr, uint8_t val, int model); |
80 | uint16_t __atomic_fetch_sub_2(uint16_t *ptr, uint16_t val, int model); |
81 | uint32_t __atomic_fetch_sub_4(uint32_t *ptr, uint32_t val, int model); |
82 | uint64_t __atomic_fetch_sub_8(uint64_t *ptr, uint64_t val, int model); |
83 | |
84 | uint8_t __atomic_fetch_and_1(uint8_t *ptr, uint8_t val, int model); |
85 | uint16_t __atomic_fetch_and_2(uint16_t *ptr, uint16_t val, int model); |
86 | uint32_t __atomic_fetch_and_4(uint32_t *ptr, uint32_t val, int model); |
87 | uint64_t __atomic_fetch_and_8(uint64_t *ptr, uint64_t val, int model); |
88 | |
89 | uint8_t __atomic_fetch_or_1(uint8_t *ptr, uint8_t val, int model); |
90 | uint16_t __atomic_fetch_or_2(uint16_t *ptr, uint16_t val, int model); |
91 | uint32_t __atomic_fetch_or_4(uint32_t *ptr, uint32_t val, int model); |
92 | uint64_t __atomic_fetch_or_8(uint64_t *ptr, uint64_t val, int model); |
93 | |
94 | uint8_t __atomic_fetch_xor_1(uint8_t *ptr, uint8_t val, int model); |
95 | uint16_t __atomic_fetch_xor_2(uint16_t *ptr, uint16_t val, int model); |
96 | uint32_t __atomic_fetch_xor_4(uint32_t *ptr, uint32_t val, int model); |
97 | uint64_t __atomic_fetch_xor_8(uint64_t *ptr, uint64_t val, int model); |
98 | |
99 | uint8_t __atomic_fetch_nand_1(uint8_t *ptr, uint8_t val, int model); |
100 | uint16_t __atomic_fetch_nand_2(uint16_t *ptr, uint16_t val, int model); |
101 | uint32_t __atomic_fetch_nand_4(uint32_t *ptr, uint32_t val, int model); |
102 | uint64_t __atomic_fetch_nand_8(uint64_t *ptr, uint64_t val, int model); |
103 | |
104 | // We conditionally test the *_16 atomic function variants based on the same |
105 | // condition that compiler_rt (atomic.c) uses to conditionally generate them. |
106 | // Currently atomic.c tests if __SIZEOF_INT128__ is defined (which can be the |
107 | // case on 32-bit platforms, by using -fforce-enable-int128), instead of using |
108 | // CRT_HAS_128BIT. |
109 | |
110 | #ifdef __SIZEOF_INT128__ |
111 | #define TEST_16 |
112 | #endif |
113 | |
114 | #ifdef TEST_16 |
115 | typedef __uint128_t uint128_t; |
116 | typedef uint128_t maxuint_t; |
117 | uint128_t __atomic_load_16(uint128_t *src, int model); |
118 | void __atomic_store_16(uint128_t *dest, uint128_t val, int model); |
119 | uint128_t __atomic_exchange_16(uint128_t *dest, uint128_t val, int model); |
120 | bool __atomic_compare_exchange_16(uint128_t *ptr, uint128_t *expected, |
121 | uint128_t desired, int success, int failure); |
122 | uint128_t __atomic_fetch_add_16(uint128_t *ptr, uint128_t val, int model); |
123 | uint128_t __atomic_fetch_sub_16(uint128_t *ptr, uint128_t val, int model); |
124 | uint128_t __atomic_fetch_and_16(uint128_t *ptr, uint128_t val, int model); |
125 | uint128_t __atomic_fetch_or_16(uint128_t *ptr, uint128_t val, int model); |
126 | uint128_t __atomic_fetch_xor_16(uint128_t *ptr, uint128_t val, int model); |
127 | uint128_t __atomic_fetch_nand_16(uint128_t *ptr, uint128_t val, int model); |
128 | #else |
129 | typedef uint64_t maxuint_t; |
130 | #endif |
131 | |
132 | #define U8(value) ((uint8_t)(value)) |
133 | #define U16(value) ((uint16_t)(value)) |
134 | #define U32(value) ((uint32_t)(value)) |
135 | #define U64(value) ((uint64_t)(value)) |
136 | |
137 | #ifdef TEST_16 |
138 | #define V ((((uint128_t)0x4243444546474849) << 64) | 0x4a4b4c4d4e4f5051) |
139 | #define ONES ((((uint128_t)0x0101010101010101) << 64) | 0x0101010101010101) |
140 | #else |
141 | #define V 0x4243444546474849 |
142 | #define ONES 0x0101010101010101 |
143 | #endif |
144 | |
145 | #define LEN(array) (sizeof(array) / sizeof(array[0])) |
146 | |
147 | __attribute__((aligned(16))) static const char data[] = { |
148 | 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, |
149 | 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, |
150 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, |
151 | 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, |
152 | }; |
153 | |
154 | uint8_t a8, b8; |
155 | uint16_t a16, b16; |
156 | uint32_t a32, b32; |
157 | uint64_t a64, b64; |
158 | #ifdef TEST_16 |
159 | uint128_t a128, b128; |
160 | #endif |
161 | |
162 | void set_a_values(maxuint_t value) { |
163 | a8 = U8(value); |
164 | a16 = U16(value); |
165 | a32 = U32(value); |
166 | a64 = U64(value); |
167 | #ifdef TEST_16 |
168 | a128 = value; |
169 | #endif |
170 | } |
171 | |
172 | void set_b_values(maxuint_t value) { |
173 | b8 = U8(value); |
174 | b16 = U16(value); |
175 | b32 = U32(value); |
176 | b64 = U64(value); |
177 | #ifdef TEST_16 |
178 | b128 = value; |
179 | #endif |
180 | } |
181 | |
182 | void test_loads(void) { |
183 | static int atomic_load_models[] = { |
184 | __ATOMIC_RELAXED, |
185 | __ATOMIC_CONSUME, |
186 | __ATOMIC_ACQUIRE, |
187 | __ATOMIC_SEQ_CST, |
188 | }; |
189 | |
190 | for (int m = 0; m < LEN(atomic_load_models); m++) { |
191 | int model = atomic_load_models[m]; |
192 | |
193 | // Test with aligned data. |
194 | for (int n = 1; n <= LEN(data); n++) { |
195 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |
196 | __atomic_load_c(n, data, dst, model); |
197 | if (memcmp(s1: dst, s2: data, n: n) != 0) |
198 | abort(); |
199 | } |
200 | |
201 | // Test with unaligned data. |
202 | for (int n = 1; n < LEN(data); n++) { |
203 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |
204 | __atomic_load_c(n, data + 1, dst + 1, model); |
205 | if (memcmp(s1: dst + 1, s2: data + 1, n: n) != 0) |
206 | abort(); |
207 | } |
208 | |
209 | set_a_values(V + m); |
210 | if (__atomic_load_1(src: &a8, model) != U8(V + m)) |
211 | abort(); |
212 | if (__atomic_load_2(src: &a16, model) != U16(V + m)) |
213 | abort(); |
214 | if (__atomic_load_4(src: &a32, model) != U32(V + m)) |
215 | abort(); |
216 | if (__atomic_load_8(src: &a64, model) != U64(V + m)) |
217 | abort(); |
218 | #ifdef TEST_16 |
219 | if (__atomic_load_16(src: &a128, model) != V + m) |
220 | abort(); |
221 | #endif |
222 | } |
223 | } |
224 | |
225 | void test_stores(void) { |
226 | static int atomic_store_models[] = { |
227 | __ATOMIC_RELAXED, |
228 | __ATOMIC_RELEASE, |
229 | __ATOMIC_SEQ_CST, |
230 | }; |
231 | |
232 | for (int m = 0; m < LEN(atomic_store_models); m++) { |
233 | int model = atomic_store_models[m]; |
234 | |
235 | // Test with aligned data. |
236 | for (int n = 1; n <= LEN(data); n++) { |
237 | __attribute__((aligned(16))) char dst[LEN(data)]; |
238 | __atomic_store_c(n, dst, data, model); |
239 | if (memcmp(s1: data, s2: dst, n: n) != 0) |
240 | abort(); |
241 | } |
242 | |
243 | // Test with unaligned data. |
244 | for (int n = 1; n < LEN(data); n++) { |
245 | __attribute__((aligned(16))) char dst[LEN(data)]; |
246 | __atomic_store_c(n, dst + 1, data + 1, model); |
247 | if (memcmp(s1: data + 1, s2: dst + 1, n: n) != 0) |
248 | abort(); |
249 | } |
250 | |
251 | __atomic_store_1(dest: &a8, U8(V + m), model); |
252 | if (a8 != U8(V + m)) |
253 | abort(); |
254 | __atomic_store_2(dest: &a16, U16(V + m), model); |
255 | if (a16 != U16(V + m)) |
256 | abort(); |
257 | __atomic_store_4(dest: &a32, U32(V + m), model); |
258 | if (a32 != U32(V + m)) |
259 | abort(); |
260 | __atomic_store_8(dest: &a64, U64(V + m), model); |
261 | if (a64 != U64(V + m)) |
262 | abort(); |
263 | #ifdef TEST_16 |
264 | __atomic_store_16(dest: &a128, V + m, model); |
265 | if (a128 != V + m) |
266 | abort(); |
267 | #endif |
268 | } |
269 | } |
270 | |
271 | void test_exchanges(void) { |
272 | static int atomic_exchange_models[] = { |
273 | __ATOMIC_RELAXED, |
274 | __ATOMIC_ACQUIRE, |
275 | __ATOMIC_RELEASE, |
276 | __ATOMIC_ACQ_REL, |
277 | __ATOMIC_SEQ_CST, |
278 | }; |
279 | |
280 | set_a_values(V); |
281 | |
282 | for (int m = 0; m < LEN(atomic_exchange_models); m++) { |
283 | int model = atomic_exchange_models[m]; |
284 | |
285 | // Test with aligned data. |
286 | for (int n = 1; n <= LEN(data); n++) { |
287 | __attribute__((aligned(16))) char dst[LEN(data)]; |
288 | __attribute__((aligned(16))) char old[LEN(data)]; |
289 | for (int i = 0; i < LEN(dst); i++) |
290 | dst[i] = i + m; |
291 | __atomic_exchange_c(n, dst, data, old, model); |
292 | for (int i = 0; i < n; i++) { |
293 | if (dst[i] != 0x10 + i || old[i] != i + m) |
294 | abort(); |
295 | } |
296 | } |
297 | |
298 | // Test with unaligned data. |
299 | for (int n = 1; n < LEN(data); n++) { |
300 | __attribute__((aligned(16))) char dst[LEN(data)]; |
301 | __attribute__((aligned(16))) char old[LEN(data)]; |
302 | for (int i = 1; i < LEN(dst); i++) |
303 | dst[i] = i - 1 + m; |
304 | __atomic_exchange_c(n, dst + 1, data + 1, old + 1, model); |
305 | for (int i = 1; i < n; i++) { |
306 | if (dst[i] != 0x10 + i || old[i] != i - 1 + m) |
307 | abort(); |
308 | } |
309 | } |
310 | |
311 | if (__atomic_exchange_1(dest: &a8, U8(V + m + 1), model) != U8(V + m)) |
312 | abort(); |
313 | if (__atomic_exchange_2(dest: &a16, U16(V + m + 1), model) != U16(V + m)) |
314 | abort(); |
315 | if (__atomic_exchange_4(dest: &a32, U32(V + m + 1), model) != U32(V + m)) |
316 | abort(); |
317 | if (__atomic_exchange_8(dest: &a64, U64(V + m + 1), model) != U64(V + m)) |
318 | abort(); |
319 | #ifdef TEST_16 |
320 | if (__atomic_exchange_16(dest: &a128, V + m + 1, model) != V + m) |
321 | abort(); |
322 | #endif |
323 | } |
324 | } |
325 | |
326 | void test_compare_exchanges(void) { |
327 | static int atomic_compare_exchange_models[] = { |
328 | __ATOMIC_RELAXED, |
329 | __ATOMIC_CONSUME, |
330 | __ATOMIC_ACQUIRE, |
331 | __ATOMIC_SEQ_CST, |
332 | __ATOMIC_RELEASE, |
333 | __ATOMIC_ACQ_REL, |
334 | }; |
335 | |
336 | for (int m1 = 0; m1 < LEN(atomic_compare_exchange_models); m1++) { |
337 | // Skip the last two: __ATOMIC_RELEASE and __ATOMIC_ACQ_REL. |
338 | // See <http://wg21.link/p0418> for details. |
339 | for (int m2 = 0; m2 < LEN(atomic_compare_exchange_models) - 2; m2++) { |
340 | int m_succ = atomic_compare_exchange_models[m1]; |
341 | int m_fail = atomic_compare_exchange_models[m2]; |
342 | |
343 | // Test with aligned data. |
344 | for (int n = 1; n <= LEN(data); n++) { |
345 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |
346 | __attribute__((aligned(16))) char exp[LEN(data)] = {0}; |
347 | if (!__atomic_compare_exchange_c(n, dst, exp, data, m_succ, m_fail)) |
348 | abort(); |
349 | if (memcmp(s1: dst, s2: data, n: n) != 0) |
350 | abort(); |
351 | if (__atomic_compare_exchange_c(n, dst, exp, data, m_succ, m_fail)) |
352 | abort(); |
353 | if (memcmp(s1: exp, s2: data, n: n) != 0) |
354 | abort(); |
355 | } |
356 | |
357 | // Test with unaligned data. |
358 | for (int n = 1; n < LEN(data); n++) { |
359 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |
360 | __attribute__((aligned(16))) char exp[LEN(data)] = {0}; |
361 | if (!__atomic_compare_exchange_c(n, dst + 1, exp + 1, data + 1, |
362 | m_succ, m_fail)) |
363 | abort(); |
364 | if (memcmp(s1: dst + 1, s2: data + 1, n: n) != 0) |
365 | abort(); |
366 | if (__atomic_compare_exchange_c(n, dst + 1, exp + 1, data + 1, m_succ, |
367 | m_fail)) |
368 | abort(); |
369 | if (memcmp(s1: exp + 1, s2: data + 1, n: n) != 0) |
370 | abort(); |
371 | } |
372 | |
373 | set_a_values(ONES); |
374 | set_b_values(ONES * 2); |
375 | |
376 | if (__atomic_compare_exchange_1(ptr: &a8, expected: &b8, U8(V + m1), success: m_succ, failure: m_fail)) |
377 | abort(); |
378 | if (a8 != U8(ONES) || b8 != U8(ONES)) |
379 | abort(); |
380 | if (!__atomic_compare_exchange_1(ptr: &a8, expected: &b8, U8(V + m1), success: m_succ, failure: m_fail)) |
381 | abort(); |
382 | if (a8 != U8(V + m1) || b8 != U8(ONES)) |
383 | abort(); |
384 | |
385 | if (__atomic_compare_exchange_2(ptr: &a16, expected: &b16, U16(V + m1), success: m_succ, failure: m_fail)) |
386 | abort(); |
387 | if (a16 != U16(ONES) || b16 != U16(ONES)) |
388 | abort(); |
389 | if (!__atomic_compare_exchange_2(ptr: &a16, expected: &b16, U16(V + m1), success: m_succ, failure: m_fail)) |
390 | abort(); |
391 | if (a16 != U16(V + m1) || b16 != U16(ONES)) |
392 | abort(); |
393 | |
394 | if (__atomic_compare_exchange_4(ptr: &a32, expected: &b32, U32(V + m1), success: m_succ, failure: m_fail)) |
395 | abort(); |
396 | if (a32 != U32(ONES) || b32 != U32(ONES)) |
397 | abort(); |
398 | if (!__atomic_compare_exchange_4(ptr: &a32, expected: &b32, U32(V + m1), success: m_succ, failure: m_fail)) |
399 | abort(); |
400 | if (a32 != U32(V + m1) || b32 != U32(ONES)) |
401 | abort(); |
402 | |
403 | if (__atomic_compare_exchange_8(ptr: &a64, expected: &b64, U64(V + m1), success: m_succ, failure: m_fail)) |
404 | abort(); |
405 | if (a64 != U64(ONES) || b64 != U64(ONES)) |
406 | abort(); |
407 | if (!__atomic_compare_exchange_8(ptr: &a64, expected: &b64, U64(V + m1), success: m_succ, failure: m_fail)) |
408 | abort(); |
409 | if (a64 != U64(V + m1) || b64 != U64(ONES)) |
410 | abort(); |
411 | |
412 | #ifdef TEST_16 |
413 | if (__atomic_compare_exchange_16(ptr: &a128, expected: &b128, V + m1, success: m_succ, failure: m_fail)) |
414 | abort(); |
415 | if (a128 != ONES || b128 != ONES) |
416 | abort(); |
417 | if (!__atomic_compare_exchange_16(ptr: &a128, expected: &b128, V + m1, success: m_succ, failure: m_fail)) |
418 | abort(); |
419 | if (a128 != V + m1 || b128 != ONES) |
420 | abort(); |
421 | #endif |
422 | } |
423 | } |
424 | } |
425 | |
426 | void test_fetch_op(void) { |
427 | static int atomic_fetch_models[] = { |
428 | __ATOMIC_RELAXED, |
429 | __ATOMIC_CONSUME, |
430 | __ATOMIC_ACQUIRE, |
431 | __ATOMIC_RELEASE, |
432 | __ATOMIC_ACQ_REL, |
433 | __ATOMIC_SEQ_CST, |
434 | }; |
435 | |
436 | for (int m = 0; m < LEN(atomic_fetch_models); m++) { |
437 | int model = atomic_fetch_models[m]; |
438 | |
439 | // Fetch add. |
440 | |
441 | set_a_values(V + m); |
442 | set_b_values(0); |
443 | b8 = __atomic_fetch_add_1(ptr: &a8, U8(ONES), model); |
444 | if (b8 != U8(V + m) || a8 != U8(V + m + ONES)) |
445 | abort(); |
446 | b16 = __atomic_fetch_add_2(ptr: &a16, U16(ONES), model); |
447 | if (b16 != U16(V + m) || a16 != U16(V + m + ONES)) |
448 | abort(); |
449 | b32 = __atomic_fetch_add_4(ptr: &a32, U32(ONES), model); |
450 | if (b32 != U32(V + m) || a32 != U32(V + m + ONES)) |
451 | abort(); |
452 | b64 = __atomic_fetch_add_8(ptr: &a64, U64(ONES), model); |
453 | if (b64 != U64(V + m) || a64 != U64(V + m + ONES)) |
454 | abort(); |
455 | #ifdef TEST_16 |
456 | b128 = __atomic_fetch_add_16(ptr: &a128, ONES, model); |
457 | if (b128 != V + m || a128 != V + m + ONES) |
458 | abort(); |
459 | #endif |
460 | |
461 | // Fetch sub. |
462 | |
463 | set_a_values(V + m); |
464 | set_b_values(0); |
465 | b8 = __atomic_fetch_sub_1(ptr: &a8, U8(ONES), model); |
466 | if (b8 != U8(V + m) || a8 != U8(V + m - ONES)) |
467 | abort(); |
468 | b16 = __atomic_fetch_sub_2(ptr: &a16, U16(ONES), model); |
469 | if (b16 != U16(V + m) || a16 != U16(V + m - ONES)) |
470 | abort(); |
471 | b32 = __atomic_fetch_sub_4(ptr: &a32, U32(ONES), model); |
472 | if (b32 != U32(V + m) || a32 != U32(V + m - ONES)) |
473 | abort(); |
474 | b64 = __atomic_fetch_sub_8(ptr: &a64, U64(ONES), model); |
475 | if (b64 != U64(V + m) || a64 != U64(V + m - ONES)) |
476 | abort(); |
477 | #ifdef TEST_16 |
478 | b128 = __atomic_fetch_sub_16(ptr: &a128, ONES, model); |
479 | if (b128 != V + m || a128 != V + m - ONES) |
480 | abort(); |
481 | #endif |
482 | |
483 | // Fetch and. |
484 | |
485 | set_a_values(V + m); |
486 | set_b_values(0); |
487 | b8 = __atomic_fetch_and_1(ptr: &a8, U8(V + m), model); |
488 | if (b8 != U8(V + m) || a8 != U8(V + m)) |
489 | abort(); |
490 | b16 = __atomic_fetch_and_2(ptr: &a16, U16(V + m), model); |
491 | if (b16 != U16(V + m) || a16 != U16(V + m)) |
492 | abort(); |
493 | b32 = __atomic_fetch_and_4(ptr: &a32, U32(V + m), model); |
494 | if (b32 != U32(V + m) || a32 != U32(V + m)) |
495 | abort(); |
496 | b64 = __atomic_fetch_and_8(ptr: &a64, U64(V + m), model); |
497 | if (b64 != U64(V + m) || a64 != U64(V + m)) |
498 | abort(); |
499 | #ifdef TEST_16 |
500 | b128 = __atomic_fetch_and_16(ptr: &a128, V + m, model); |
501 | if (b128 != V + m || a128 != V + m) |
502 | abort(); |
503 | #endif |
504 | |
505 | // Fetch or. |
506 | |
507 | set_a_values(V + m); |
508 | set_b_values(0); |
509 | b8 = __atomic_fetch_or_1(ptr: &a8, U8(ONES), model); |
510 | if (b8 != U8(V + m) || a8 != U8((V + m) | ONES)) |
511 | abort(); |
512 | b16 = __atomic_fetch_or_2(ptr: &a16, U16(ONES), model); |
513 | if (b16 != U16(V + m) || a16 != U16((V + m) | ONES)) |
514 | abort(); |
515 | b32 = __atomic_fetch_or_4(ptr: &a32, U32(ONES), model); |
516 | if (b32 != U32(V + m) || a32 != U32((V + m) | ONES)) |
517 | abort(); |
518 | b64 = __atomic_fetch_or_8(ptr: &a64, U64(ONES), model); |
519 | if (b64 != U64(V + m) || a64 != U64((V + m) | ONES)) |
520 | abort(); |
521 | #ifdef TEST_16 |
522 | b128 = __atomic_fetch_or_16(ptr: &a128, ONES, model); |
523 | if (b128 != V + m || a128 != ((V + m) | ONES)) |
524 | abort(); |
525 | #endif |
526 | |
527 | // Fetch xor. |
528 | |
529 | set_a_values(V + m); |
530 | set_b_values(0); |
531 | b8 = __atomic_fetch_xor_1(ptr: &a8, U8(ONES), model); |
532 | if (b8 != U8(V + m) || a8 != U8((V + m) ^ ONES)) |
533 | abort(); |
534 | b16 = __atomic_fetch_xor_2(ptr: &a16, U16(ONES), model); |
535 | if (b16 != U16(V + m) || a16 != U16((V + m) ^ ONES)) |
536 | abort(); |
537 | b32 = __atomic_fetch_xor_4(ptr: &a32, U32(ONES), model); |
538 | if (b32 != U32(V + m) || a32 != U32((V + m) ^ ONES)) |
539 | abort(); |
540 | b64 = __atomic_fetch_xor_8(ptr: &a64, U64(ONES), model); |
541 | if (b64 != U64(V + m) || a64 != U64((V + m) ^ ONES)) |
542 | abort(); |
543 | #ifdef TEST_16 |
544 | b128 = __atomic_fetch_xor_16(ptr: &a128, ONES, model); |
545 | if (b128 != (V + m) || a128 != ((V + m) ^ ONES)) |
546 | abort(); |
547 | #endif |
548 | |
549 | // Fetch nand. |
550 | |
551 | set_a_values(V + m); |
552 | set_b_values(0); |
553 | b8 = __atomic_fetch_nand_1(ptr: &a8, U8(ONES), model); |
554 | if (b8 != U8(V + m) || a8 != U8(~((V + m) & ONES))) |
555 | abort(); |
556 | b16 = __atomic_fetch_nand_2(ptr: &a16, U16(ONES), model); |
557 | if (b16 != U16(V + m) || a16 != U16(~((V + m) & ONES))) |
558 | abort(); |
559 | b32 = __atomic_fetch_nand_4(ptr: &a32, U32(ONES), model); |
560 | if (b32 != U32(V + m) || a32 != U32(~((V + m) & ONES))) |
561 | abort(); |
562 | b64 = __atomic_fetch_nand_8(ptr: &a64, U64(ONES), model); |
563 | if (b64 != U64(V + m) || a64 != U64(~((V + m) & ONES))) |
564 | abort(); |
565 | #ifdef TEST_16 |
566 | b128 = __atomic_fetch_nand_16(ptr: &a128, ONES, model); |
567 | if (b128 != (V + m) || a128 != ~((V + m) & ONES)) |
568 | abort(); |
569 | #endif |
570 | |
571 | // Check signed integer overflow behavior |
572 | |
573 | set_a_values(V + m); |
574 | __atomic_fetch_add_1(ptr: &a8, U8(V), model); |
575 | if (a8 != U8(V * 2 + m)) |
576 | abort(); |
577 | __atomic_fetch_sub_1(ptr: &a8, U8(V), model); |
578 | if (a8 != U8(V + m)) |
579 | abort(); |
580 | __atomic_fetch_add_2(ptr: &a16, U16(V), model); |
581 | if (a16 != U16(V * 2 + m)) |
582 | abort(); |
583 | __atomic_fetch_sub_2(ptr: &a16, U16(V), model); |
584 | if (a16 != U16(V + m)) |
585 | abort(); |
586 | __atomic_fetch_add_4(ptr: &a32, U32(V), model); |
587 | if (a32 != U32(V * 2 + m)) |
588 | abort(); |
589 | __atomic_fetch_sub_4(ptr: &a32, U32(V), model); |
590 | if (a32 != U32(V + m)) |
591 | abort(); |
592 | __atomic_fetch_add_8(ptr: &a64, U64(V), model); |
593 | if (a64 != U64(V * 2 + m)) |
594 | abort(); |
595 | __atomic_fetch_sub_8(ptr: &a64, U64(V), model); |
596 | if (a64 != U64(V + m)) |
597 | abort(); |
598 | #ifdef TEST_16 |
599 | __atomic_fetch_add_16(ptr: &a128, V, model); |
600 | if (a128 != V * 2 + m) |
601 | abort(); |
602 | __atomic_fetch_sub_16(ptr: &a128, V, model); |
603 | if (a128 != V + m) |
604 | abort(); |
605 | #endif |
606 | } |
607 | } |
608 | |
609 | void test_is_lock_free(void) { |
610 | // The result of __atomic_is_lock_free is architecture dependent, so we only |
611 | // check for a true return value for the sizes where we know that at compile |
612 | // time that they are supported. If __atomic_always_lock_free() returns false |
613 | // for a given size, we can only check that __atomic_is_lock_free() returns |
614 | // false for unaligned values. |
615 | // Note: This assumption will have to be revisited when we support an |
616 | // architecture that allows for unaligned atomics. |
617 | // XXX: Do any architectures report true for unaligned atomics? |
618 | |
619 | // All atomic.c implementations fall back to the non-specialized case for |
620 | // size=0, so despite the operation being a no-op, they still take locks and |
621 | // therefore __atomic_is_lock_free should return false. |
622 | assert(!__atomic_is_lock_free_c(0, NULL) && "size zero should never be lock-free" ); |
623 | assert(!__atomic_is_lock_free_c(0, (void *)8) && "size zero should never be lock-free" ); |
624 | |
625 | if (__atomic_always_lock_free(1, 0)) { |
626 | assert(__atomic_is_lock_free_c(1, NULL) && "aligned size=1 should always be lock-free" ); |
627 | assert(__atomic_is_lock_free_c(1, (void *)1) && "aligned size=1 should always be lock-free" ); |
628 | } |
629 | |
630 | if (__atomic_always_lock_free(2, 0)) { |
631 | assert(__atomic_is_lock_free_c(2, NULL) && "aligned size=2 should always be lock-free" ); |
632 | assert(__atomic_is_lock_free_c(2, (void *)2) && "aligned size=2 should always be lock-free" ); |
633 | } |
634 | assert(!__atomic_is_lock_free_c(2, (void *)1) && "unaligned size=2 should not be lock-free" ); |
635 | |
636 | if (__atomic_always_lock_free(4, 0)) { |
637 | assert(__atomic_is_lock_free_c(4, NULL) && "aligned size=4 should always be lock-free" ); |
638 | assert(__atomic_is_lock_free_c(4, (void *)4) && "aligned size=4 should always be lock-free" ); |
639 | } |
640 | assert(!__atomic_is_lock_free_c(4, (void *)3) && "unaligned size=4 should not be lock-free" ); |
641 | assert(!__atomic_is_lock_free_c(4, (void *)2) && "unaligned size=4 should not be lock-free" ); |
642 | assert(!__atomic_is_lock_free_c(4, (void *)1) && "unaligned size=4 should not be lock-free" ); |
643 | |
644 | if (__atomic_always_lock_free(8, 0)) { |
645 | assert(__atomic_is_lock_free_c(8, NULL) && "aligned size=8 should always be lock-free" ); |
646 | assert(__atomic_is_lock_free_c(8, (void *)8) && "aligned size=8 should always be lock-free" ); |
647 | } |
648 | assert(!__atomic_is_lock_free_c(8, (void *)7) && "unaligned size=8 should not be lock-free" ); |
649 | assert(!__atomic_is_lock_free_c(8, (void *)4) && "unaligned size=8 should not be lock-free" ); |
650 | assert(!__atomic_is_lock_free_c(8, (void *)2) && "unaligned size=8 should not be lock-free" ); |
651 | assert(!__atomic_is_lock_free_c(8, (void *)1) && "unaligned size=8 should not be lock-free" ); |
652 | |
653 | if (__atomic_always_lock_free(16, 0)) { |
654 | assert(__atomic_is_lock_free_c(16, NULL) && "aligned size=16 should always be lock-free" ); |
655 | assert(__atomic_is_lock_free_c(16, (void *)16) && "aligned size=16 should always be lock-free" ); |
656 | } |
657 | assert(!__atomic_is_lock_free_c(16, (void *)15) && "unaligned size=16 should not be lock-free" ); |
658 | assert(!__atomic_is_lock_free_c(16, (void *)8) && "unaligned size=16 should not be lock-free" ); |
659 | assert(!__atomic_is_lock_free_c(16, (void *)4) && "unaligned size=16 should not be lock-free" ); |
660 | assert(!__atomic_is_lock_free_c(16, (void *)2) && "unaligned size=16 should not be lock-free" ); |
661 | assert(!__atomic_is_lock_free_c(16, (void *)1) && "unaligned size=16 should not be lock-free" ); |
662 | |
663 | // In the current implementation > 16 bytes are never lock-free: |
664 | assert(!__atomic_is_lock_free_c(32, NULL) && "aligned size=32 should not be lock-free" ); |
665 | assert(!__atomic_is_lock_free_c(32, (void*)32) && "aligned size=32 should not be lock-free" ); |
666 | assert(!__atomic_is_lock_free_c(32, (void*)31) && "unaligned size=32 should not be lock-free" ); |
667 | |
668 | // We also don't support non-power-of-two sizes: |
669 | assert(!__atomic_is_lock_free_c(3, NULL) && "aligned size=3 should not be lock-free" ); |
670 | assert(!__atomic_is_lock_free_c(5, NULL) && "aligned size=5 should not be lock-free" ); |
671 | assert(!__atomic_is_lock_free_c(6, NULL) && "aligned size=6 should not be lock-free" ); |
672 | assert(!__atomic_is_lock_free_c(7, NULL) && "aligned size=7 should not be lock-free" ); |
673 | assert(!__atomic_is_lock_free_c(9, NULL) && "aligned size=9 should not be lock-free" ); |
674 | assert(!__atomic_is_lock_free_c(10, NULL) && "aligned size=10 should not be lock-free" ); |
675 | assert(!__atomic_is_lock_free_c(11, NULL) && "aligned size=11 should not be lock-free" ); |
676 | assert(!__atomic_is_lock_free_c(12, NULL) && "aligned size=12 should not be lock-free" ); |
677 | assert(!__atomic_is_lock_free_c(13, NULL) && "aligned size=13 should not be lock-free" ); |
678 | assert(!__atomic_is_lock_free_c(14, NULL) && "aligned size=14 should not be lock-free" ); |
679 | assert(!__atomic_is_lock_free_c(15, NULL) && "aligned size=15 should not be lock-free" ); |
680 | assert(!__atomic_is_lock_free_c(17, NULL) && "aligned size=17 should not be lock-free" ); |
681 | } |
682 | |
683 | int main() { |
684 | test_loads(); |
685 | test_stores(); |
686 | test_exchanges(); |
687 | test_compare_exchanges(); |
688 | test_fetch_op(); |
689 | test_is_lock_free(); |
690 | return 0; |
691 | } |
692 | |