1 | /* |
2 | * Copyright © 2011 Ryan Lortie |
3 | * |
4 | * This library is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU Lesser General Public |
6 | * License as published by the Free Software Foundation; either |
7 | * version 2.1 of the License, or (at your option) any later version. |
8 | * |
9 | * This library is distributed in the hope that it will be useful, but |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * Lesser General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU Lesser General Public |
15 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
16 | * |
17 | * Author: Ryan Lortie <desrt@desrt.ca> |
18 | */ |
19 | |
20 | #include "config.h" |
21 | |
22 | #include "gatomic.h" |
23 | |
24 | /** |
25 | * SECTION:atomic_operations |
26 | * @title: Atomic Operations |
27 | * @short_description: basic atomic integer and pointer operations |
28 | * @see_also: #GMutex |
29 | * |
30 | * The following is a collection of compiler macros to provide atomic |
31 | * access to integer and pointer-sized values. |
32 | * |
33 | * The macros that have 'int' in the name will operate on pointers to |
34 | * #gint and #guint. The macros with 'pointer' in the name will operate |
35 | * on pointers to any pointer-sized value, including #gsize. There is |
36 | * no support for 64bit operations on platforms with 32bit pointers |
37 | * because it is not generally possible to perform these operations |
38 | * atomically. |
39 | * |
40 | * The get, set and exchange operations for integers and pointers |
41 | * nominally operate on #gint and #gpointer, respectively. Of the |
42 | * arithmetic operations, the 'add' operation operates on (and returns) |
43 | * signed integer values (#gint and #gssize) and the 'and', 'or', and |
44 | * 'xor' operations operate on (and return) unsigned integer values |
45 | * (#guint and #gsize). |
46 | * |
47 | * All of the operations act as a full compiler and (where appropriate) |
48 | * hardware memory barrier. Acquire and release or producer and |
49 | * consumer barrier semantics are not available through this API. |
50 | * |
51 | * It is very important that all accesses to a particular integer or |
52 | * pointer be performed using only this API and that different sizes of |
53 | * operation are not mixed or used on overlapping memory regions. Never |
54 | * read or assign directly from or to a value -- always use this API. |
55 | * |
56 | * For simple reference counting purposes you should use |
57 | * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that |
58 | * fall outside of simple reference counting patterns are prone to |
59 | * subtle bugs and occasionally undefined behaviour. It is also worth |
60 | * noting that since all of these operations require global |
61 | * synchronisation of the entire machine, they can be quite slow. In |
62 | * the case of performing multiple atomic operations it can often be |
63 | * faster to simply acquire a mutex lock around the critical area, |
64 | * perform the operations normally and then release the lock. |
65 | **/ |
66 | |
67 | /** |
68 | * G_ATOMIC_LOCK_FREE: |
69 | * |
70 | * This macro is defined if the atomic operations of GLib are |
71 | * implemented using real hardware atomic operations. This means that |
72 | * the GLib atomic API can be used between processes and safely mixed |
73 | * with other (hardware) atomic APIs. |
74 | * |
75 | * If this macro is not defined, the atomic operations may be |
76 | * emulated using a mutex. In that case, the GLib atomic operations are |
77 | * only atomic relative to themselves and within a single process. |
78 | **/ |
79 | |
80 | /* NOTE CAREFULLY: |
81 | * |
82 | * This file is the lowest-level part of GLib. |
83 | * |
84 | * Other lowlevel parts of GLib (threads, slice allocator, g_malloc, |
85 | * messages, etc) call into these functions and macros to get work done. |
86 | * |
87 | * As such, these functions can not call back into any part of GLib |
88 | * without risking recursion. |
89 | */ |
90 | |
91 | #ifdef G_ATOMIC_LOCK_FREE |
92 | |
93 | /* if G_ATOMIC_LOCK_FREE was defined by `meson configure` then we MUST |
94 | * implement the atomic operations in a lock-free manner. |
95 | */ |
96 | |
97 | #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) |
98 | |
99 | /** |
100 | * g_atomic_int_get: |
101 | * @atomic: a pointer to a #gint or #guint |
102 | * |
103 | * Gets the current value of @atomic. |
104 | * |
105 | * This call acts as a full compiler and hardware |
106 | * memory barrier (before the get). |
107 | * |
108 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
109 | * the pointer passed to it should not be `volatile`. |
110 | * |
111 | * Returns: the value of the integer |
112 | * |
113 | * Since: 2.4 |
114 | **/ |
115 | gint |
116 | (g_atomic_int_get) (const volatile gint *atomic) |
117 | { |
118 | return g_atomic_int_get (atomic); |
119 | } |
120 | |
121 | /** |
122 | * g_atomic_int_set: |
123 | * @atomic: a pointer to a #gint or #guint |
124 | * @newval: a new value to store |
125 | * |
126 | * Sets the value of @atomic to @newval. |
127 | * |
128 | * This call acts as a full compiler and hardware |
129 | * memory barrier (after the set). |
130 | * |
131 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
132 | * the pointer passed to it should not be `volatile`. |
133 | * |
134 | * Since: 2.4 |
135 | */ |
136 | void |
137 | (g_atomic_int_set) (volatile gint *atomic, |
138 | gint newval) |
139 | { |
140 | g_atomic_int_set (atomic, newval); |
141 | } |
142 | |
143 | /** |
144 | * g_atomic_int_inc: |
145 | * @atomic: a pointer to a #gint or #guint |
146 | * |
147 | * Increments the value of @atomic by 1. |
148 | * |
149 | * Think of this operation as an atomic version of `{ *atomic += 1; }`. |
150 | * |
151 | * This call acts as a full compiler and hardware memory barrier. |
152 | * |
153 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
154 | * the pointer passed to it should not be `volatile`. |
155 | * |
156 | * Since: 2.4 |
157 | **/ |
158 | void |
159 | (g_atomic_int_inc) (volatile gint *atomic) |
160 | { |
161 | g_atomic_int_inc (atomic); |
162 | } |
163 | |
164 | /** |
165 | * g_atomic_int_dec_and_test: |
166 | * @atomic: a pointer to a #gint or #guint |
167 | * |
168 | * Decrements the value of @atomic by 1. |
169 | * |
170 | * Think of this operation as an atomic version of |
171 | * `{ *atomic -= 1; return (*atomic == 0); }`. |
172 | * |
173 | * This call acts as a full compiler and hardware memory barrier. |
174 | * |
175 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
176 | * the pointer passed to it should not be `volatile`. |
177 | * |
178 | * Returns: %TRUE if the resultant value is zero |
179 | * |
180 | * Since: 2.4 |
181 | **/ |
182 | gboolean |
183 | (g_atomic_int_dec_and_test) (volatile gint *atomic) |
184 | { |
185 | return g_atomic_int_dec_and_test (atomic); |
186 | } |
187 | |
188 | /** |
189 | * g_atomic_int_compare_and_exchange: |
190 | * @atomic: a pointer to a #gint or #guint |
191 | * @oldval: the value to compare with |
192 | * @newval: the value to conditionally replace with |
193 | * |
194 | * Compares @atomic to @oldval and, if equal, sets it to @newval. |
195 | * If @atomic was not equal to @oldval then no change occurs. |
196 | * |
197 | * This compare and exchange is done atomically. |
198 | * |
199 | * Think of this operation as an atomic version of |
200 | * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`. |
201 | * |
202 | * This call acts as a full compiler and hardware memory barrier. |
203 | * |
204 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
205 | * the pointer passed to it should not be `volatile`. |
206 | * |
207 | * Returns: %TRUE if the exchange took place |
208 | * |
209 | * Since: 2.4 |
210 | **/ |
211 | gboolean |
212 | (g_atomic_int_compare_and_exchange) (volatile gint *atomic, |
213 | gint oldval, |
214 | gint newval) |
215 | { |
216 | return g_atomic_int_compare_and_exchange (atomic, oldval, newval); |
217 | } |
218 | |
219 | /** |
220 | * g_atomic_int_add: |
221 | * @atomic: a pointer to a #gint or #guint |
222 | * @val: the value to add |
223 | * |
224 | * Atomically adds @val to the value of @atomic. |
225 | * |
226 | * Think of this operation as an atomic version of |
227 | * `{ tmp = *atomic; *atomic += val; return tmp; }`. |
228 | * |
229 | * This call acts as a full compiler and hardware memory barrier. |
230 | * |
231 | * Before version 2.30, this function did not return a value |
232 | * (but g_atomic_int_exchange_and_add() did, and had the same meaning). |
233 | * |
234 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
235 | * the pointer passed to it should not be `volatile`. |
236 | * |
237 | * Returns: the value of @atomic before the add, signed |
238 | * |
239 | * Since: 2.4 |
240 | **/ |
241 | gint |
242 | (g_atomic_int_add) (volatile gint *atomic, |
243 | gint val) |
244 | { |
245 | return g_atomic_int_add (atomic, val); |
246 | } |
247 | |
248 | /** |
249 | * g_atomic_int_and: |
250 | * @atomic: a pointer to a #gint or #guint |
251 | * @val: the value to 'and' |
252 | * |
253 | * Performs an atomic bitwise 'and' of the value of @atomic and @val, |
254 | * storing the result back in @atomic. |
255 | * |
256 | * This call acts as a full compiler and hardware memory barrier. |
257 | * |
258 | * Think of this operation as an atomic version of |
259 | * `{ tmp = *atomic; *atomic &= val; return tmp; }`. |
260 | * |
261 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
262 | * the pointer passed to it should not be `volatile`. |
263 | * |
264 | * Returns: the value of @atomic before the operation, unsigned |
265 | * |
266 | * Since: 2.30 |
267 | **/ |
268 | guint |
269 | (g_atomic_int_and) (volatile guint *atomic, |
270 | guint val) |
271 | { |
272 | return g_atomic_int_and (atomic, val); |
273 | } |
274 | |
275 | /** |
276 | * g_atomic_int_or: |
277 | * @atomic: a pointer to a #gint or #guint |
278 | * @val: the value to 'or' |
279 | * |
280 | * Performs an atomic bitwise 'or' of the value of @atomic and @val, |
281 | * storing the result back in @atomic. |
282 | * |
283 | * Think of this operation as an atomic version of |
284 | * `{ tmp = *atomic; *atomic |= val; return tmp; }`. |
285 | * |
286 | * This call acts as a full compiler and hardware memory barrier. |
287 | * |
288 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
289 | * the pointer passed to it should not be `volatile`. |
290 | * |
291 | * Returns: the value of @atomic before the operation, unsigned |
292 | * |
293 | * Since: 2.30 |
294 | **/ |
295 | guint |
296 | (g_atomic_int_or) (volatile guint *atomic, |
297 | guint val) |
298 | { |
299 | return g_atomic_int_or (atomic, val); |
300 | } |
301 | |
302 | /** |
303 | * g_atomic_int_xor: |
304 | * @atomic: a pointer to a #gint or #guint |
305 | * @val: the value to 'xor' |
306 | * |
307 | * Performs an atomic bitwise 'xor' of the value of @atomic and @val, |
308 | * storing the result back in @atomic. |
309 | * |
310 | * Think of this operation as an atomic version of |
311 | * `{ tmp = *atomic; *atomic ^= val; return tmp; }`. |
312 | * |
313 | * This call acts as a full compiler and hardware memory barrier. |
314 | * |
315 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
316 | * the pointer passed to it should not be `volatile`. |
317 | * |
318 | * Returns: the value of @atomic before the operation, unsigned |
319 | * |
320 | * Since: 2.30 |
321 | **/ |
322 | guint |
323 | (g_atomic_int_xor) (volatile guint *atomic, |
324 | guint val) |
325 | { |
326 | return g_atomic_int_xor (atomic, val); |
327 | } |
328 | |
329 | |
330 | /** |
331 | * g_atomic_pointer_get: |
332 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
333 | * |
334 | * Gets the current value of @atomic. |
335 | * |
336 | * This call acts as a full compiler and hardware |
337 | * memory barrier (before the get). |
338 | * |
339 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
340 | * the pointer passed to it should not be `volatile`. |
341 | * |
342 | * Returns: the value of the pointer |
343 | * |
344 | * Since: 2.4 |
345 | **/ |
346 | gpointer |
347 | (g_atomic_pointer_get) (const volatile void *atomic) |
348 | { |
349 | return g_atomic_pointer_get ((gpointer *) atomic); |
350 | } |
351 | |
352 | /** |
353 | * g_atomic_pointer_set: |
354 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
355 | * @newval: a new value to store |
356 | * |
357 | * Sets the value of @atomic to @newval. |
358 | * |
359 | * This call acts as a full compiler and hardware |
360 | * memory barrier (after the set). |
361 | * |
362 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
363 | * the pointer passed to it should not be `volatile`. |
364 | * |
365 | * Since: 2.4 |
366 | **/ |
367 | void |
368 | (g_atomic_pointer_set) (volatile void *atomic, |
369 | gpointer newval) |
370 | { |
371 | g_atomic_pointer_set ((gpointer *) atomic, newval); |
372 | } |
373 | |
374 | /** |
375 | * g_atomic_pointer_compare_and_exchange: |
376 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
377 | * @oldval: the value to compare with |
378 | * @newval: the value to conditionally replace with |
379 | * |
380 | * Compares @atomic to @oldval and, if equal, sets it to @newval. |
381 | * If @atomic was not equal to @oldval then no change occurs. |
382 | * |
383 | * This compare and exchange is done atomically. |
384 | * |
385 | * Think of this operation as an atomic version of |
386 | * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`. |
387 | * |
388 | * This call acts as a full compiler and hardware memory barrier. |
389 | * |
390 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
391 | * the pointer passed to it should not be `volatile`. |
392 | * |
393 | * Returns: %TRUE if the exchange took place |
394 | * |
395 | * Since: 2.4 |
396 | **/ |
397 | gboolean |
398 | (g_atomic_pointer_compare_and_exchange) (volatile void *atomic, |
399 | gpointer oldval, |
400 | gpointer newval) |
401 | { |
402 | return g_atomic_pointer_compare_and_exchange ((gpointer *) atomic, |
403 | oldval, newval); |
404 | } |
405 | |
406 | /** |
407 | * g_atomic_pointer_add: |
408 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
409 | * @val: the value to add |
410 | * |
411 | * Atomically adds @val to the value of @atomic. |
412 | * |
413 | * Think of this operation as an atomic version of |
414 | * `{ tmp = *atomic; *atomic += val; return tmp; }`. |
415 | * |
416 | * This call acts as a full compiler and hardware memory barrier. |
417 | * |
418 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
419 | * the pointer passed to it should not be `volatile`. |
420 | * |
421 | * Returns: the value of @atomic before the add, signed |
422 | * |
423 | * Since: 2.30 |
424 | **/ |
425 | gssize |
426 | (g_atomic_pointer_add) (volatile void *atomic, |
427 | gssize val) |
428 | { |
429 | return g_atomic_pointer_add ((gpointer *) atomic, val); |
430 | } |
431 | |
432 | /** |
433 | * g_atomic_pointer_and: |
434 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
435 | * @val: the value to 'and' |
436 | * |
437 | * Performs an atomic bitwise 'and' of the value of @atomic and @val, |
438 | * storing the result back in @atomic. |
439 | * |
440 | * Think of this operation as an atomic version of |
441 | * `{ tmp = *atomic; *atomic &= val; return tmp; }`. |
442 | * |
443 | * This call acts as a full compiler and hardware memory barrier. |
444 | * |
445 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
446 | * the pointer passed to it should not be `volatile`. |
447 | * |
448 | * Returns: the value of @atomic before the operation, unsigned |
449 | * |
450 | * Since: 2.30 |
451 | **/ |
452 | gsize |
453 | (g_atomic_pointer_and) (volatile void *atomic, |
454 | gsize val) |
455 | { |
456 | return g_atomic_pointer_and ((gpointer *) atomic, val); |
457 | } |
458 | |
459 | /** |
460 | * g_atomic_pointer_or: |
461 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
462 | * @val: the value to 'or' |
463 | * |
464 | * Performs an atomic bitwise 'or' of the value of @atomic and @val, |
465 | * storing the result back in @atomic. |
466 | * |
467 | * Think of this operation as an atomic version of |
468 | * `{ tmp = *atomic; *atomic |= val; return tmp; }`. |
469 | * |
470 | * This call acts as a full compiler and hardware memory barrier. |
471 | * |
472 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
473 | * the pointer passed to it should not be `volatile`. |
474 | * |
475 | * Returns: the value of @atomic before the operation, unsigned |
476 | * |
477 | * Since: 2.30 |
478 | **/ |
479 | gsize |
480 | (g_atomic_pointer_or) (volatile void *atomic, |
481 | gsize val) |
482 | { |
483 | return g_atomic_pointer_or ((gpointer *) atomic, val); |
484 | } |
485 | |
486 | /** |
487 | * g_atomic_pointer_xor: |
488 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
489 | * @val: the value to 'xor' |
490 | * |
491 | * Performs an atomic bitwise 'xor' of the value of @atomic and @val, |
492 | * storing the result back in @atomic. |
493 | * |
494 | * Think of this operation as an atomic version of |
495 | * `{ tmp = *atomic; *atomic ^= val; return tmp; }`. |
496 | * |
497 | * This call acts as a full compiler and hardware memory barrier. |
498 | * |
499 | * While @atomic has a `volatile` qualifier, this is a historical artifact and |
500 | * the pointer passed to it should not be `volatile`. |
501 | * |
502 | * Returns: the value of @atomic before the operation, unsigned |
503 | * |
504 | * Since: 2.30 |
505 | **/ |
506 | gsize |
507 | (g_atomic_pointer_xor) (volatile void *atomic, |
508 | gsize val) |
509 | { |
510 | return g_atomic_pointer_xor ((gpointer *) atomic, val); |
511 | } |
512 | |
513 | #elif defined (G_PLATFORM_WIN32) |
514 | |
515 | #include <windows.h> |
516 | #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200) |
517 | #define InterlockedAnd _InterlockedAnd |
518 | #define InterlockedOr _InterlockedOr |
519 | #define InterlockedXor _InterlockedXor |
520 | #endif |
521 | |
522 | #if !defined (_MSC_VER) || _MSC_VER <= 1200 |
523 | #include "gmessages.h" |
524 | /* Inlined versions for older compiler */ |
525 | static LONG |
526 | _gInterlockedAnd (volatile guint *atomic, |
527 | guint val) |
528 | { |
529 | LONG i, j; |
530 | |
531 | j = *atomic; |
532 | do { |
533 | i = j; |
534 | j = InterlockedCompareExchange(atomic, i & val, i); |
535 | } while (i != j); |
536 | |
537 | return j; |
538 | } |
539 | #define InterlockedAnd(a,b) _gInterlockedAnd(a,b) |
540 | static LONG |
541 | _gInterlockedOr (volatile guint *atomic, |
542 | guint val) |
543 | { |
544 | LONG i, j; |
545 | |
546 | j = *atomic; |
547 | do { |
548 | i = j; |
549 | j = InterlockedCompareExchange(atomic, i | val, i); |
550 | } while (i != j); |
551 | |
552 | return j; |
553 | } |
554 | #define InterlockedOr(a,b) _gInterlockedOr(a,b) |
555 | static LONG |
556 | _gInterlockedXor (volatile guint *atomic, |
557 | guint val) |
558 | { |
559 | LONG i, j; |
560 | |
561 | j = *atomic; |
562 | do { |
563 | i = j; |
564 | j = InterlockedCompareExchange(atomic, i ^ val, i); |
565 | } while (i != j); |
566 | |
567 | return j; |
568 | } |
569 | #define InterlockedXor(a,b) _gInterlockedXor(a,b) |
570 | #endif |
571 | |
572 | /* |
573 | * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx |
574 | */ |
575 | gint |
576 | (g_atomic_int_get) (const volatile gint *atomic) |
577 | { |
578 | MemoryBarrier (); |
579 | return *atomic; |
580 | } |
581 | |
582 | void |
583 | (g_atomic_int_set) (volatile gint *atomic, |
584 | gint newval) |
585 | { |
586 | *atomic = newval; |
587 | MemoryBarrier (); |
588 | } |
589 | |
590 | void |
591 | (g_atomic_int_inc) (volatile gint *atomic) |
592 | { |
593 | InterlockedIncrement (atomic); |
594 | } |
595 | |
596 | gboolean |
597 | (g_atomic_int_dec_and_test) (volatile gint *atomic) |
598 | { |
599 | return InterlockedDecrement (atomic) == 0; |
600 | } |
601 | |
602 | gboolean |
603 | (g_atomic_int_compare_and_exchange) (volatile gint *atomic, |
604 | gint oldval, |
605 | gint newval) |
606 | { |
607 | return InterlockedCompareExchange (atomic, newval, oldval) == oldval; |
608 | } |
609 | |
610 | gint |
611 | (g_atomic_int_add) (volatile gint *atomic, |
612 | gint val) |
613 | { |
614 | return InterlockedExchangeAdd (atomic, val); |
615 | } |
616 | |
617 | guint |
618 | (g_atomic_int_and) (volatile guint *atomic, |
619 | guint val) |
620 | { |
621 | return InterlockedAnd (atomic, val); |
622 | } |
623 | |
624 | guint |
625 | (g_atomic_int_or) (volatile guint *atomic, |
626 | guint val) |
627 | { |
628 | return InterlockedOr (atomic, val); |
629 | } |
630 | |
631 | guint |
632 | (g_atomic_int_xor) (volatile guint *atomic, |
633 | guint val) |
634 | { |
635 | return InterlockedXor (atomic, val); |
636 | } |
637 | |
638 | |
639 | gpointer |
640 | (g_atomic_pointer_get) (const volatile void *atomic) |
641 | { |
642 | const gpointer *ptr = atomic; |
643 | |
644 | MemoryBarrier (); |
645 | return *ptr; |
646 | } |
647 | |
648 | void |
649 | (g_atomic_pointer_set) (volatile void *atomic, |
650 | gpointer newval) |
651 | { |
652 | gpointer *ptr = atomic; |
653 | |
654 | *ptr = newval; |
655 | MemoryBarrier (); |
656 | } |
657 | |
658 | gboolean |
659 | (g_atomic_pointer_compare_and_exchange) (volatile void *atomic, |
660 | gpointer oldval, |
661 | gpointer newval) |
662 | { |
663 | return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval; |
664 | } |
665 | |
666 | gssize |
667 | (g_atomic_pointer_add) (volatile void *atomic, |
668 | gssize val) |
669 | { |
670 | #if GLIB_SIZEOF_VOID_P == 8 |
671 | return InterlockedExchangeAdd64 (atomic, val); |
672 | #else |
673 | return InterlockedExchangeAdd (atomic, val); |
674 | #endif |
675 | } |
676 | |
677 | gsize |
678 | (g_atomic_pointer_and) (volatile void *atomic, |
679 | gsize val) |
680 | { |
681 | #if GLIB_SIZEOF_VOID_P == 8 |
682 | return InterlockedAnd64 (atomic, val); |
683 | #else |
684 | return InterlockedAnd (atomic, val); |
685 | #endif |
686 | } |
687 | |
688 | gsize |
689 | (g_atomic_pointer_or) (volatile void *atomic, |
690 | gsize val) |
691 | { |
692 | #if GLIB_SIZEOF_VOID_P == 8 |
693 | return InterlockedOr64 (atomic, val); |
694 | #else |
695 | return InterlockedOr (atomic, val); |
696 | #endif |
697 | } |
698 | |
699 | gsize |
700 | (g_atomic_pointer_xor) (volatile void *atomic, |
701 | gsize val) |
702 | { |
703 | #if GLIB_SIZEOF_VOID_P == 8 |
704 | return InterlockedXor64 (atomic, val); |
705 | #else |
706 | return InterlockedXor (atomic, val); |
707 | #endif |
708 | } |
709 | #else |
710 | |
711 | /* This error occurs when `meson configure` decided that we should be capable |
712 | * of lock-free atomics but we find at compile-time that we are not. |
713 | */ |
714 | #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics. |
715 | |
716 | #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */ |
717 | |
718 | #else /* G_ATOMIC_LOCK_FREE */ |
719 | |
720 | /* We are not permitted to call into any GLib functions from here, so we |
721 | * can not use GMutex. |
722 | * |
723 | * Fortunately, we already take care of the Windows case above, and all |
724 | * non-Windows platforms on which glib runs have pthreads. Use those. |
725 | */ |
726 | #include <pthread.h> |
727 | |
728 | static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER; |
729 | |
730 | gint |
731 | (g_atomic_int_get) (const volatile gint *atomic) |
732 | { |
733 | gint value; |
734 | |
735 | pthread_mutex_lock (&g_atomic_lock); |
736 | value = *atomic; |
737 | pthread_mutex_unlock (&g_atomic_lock); |
738 | |
739 | return value; |
740 | } |
741 | |
742 | void |
743 | (g_atomic_int_set) (volatile gint *atomic, |
744 | gint value) |
745 | { |
746 | pthread_mutex_lock (&g_atomic_lock); |
747 | *atomic = value; |
748 | pthread_mutex_unlock (&g_atomic_lock); |
749 | } |
750 | |
751 | void |
752 | (g_atomic_int_inc) (volatile gint *atomic) |
753 | { |
754 | pthread_mutex_lock (&g_atomic_lock); |
755 | (*atomic)++; |
756 | pthread_mutex_unlock (&g_atomic_lock); |
757 | } |
758 | |
759 | gboolean |
760 | (g_atomic_int_dec_and_test) (volatile gint *atomic) |
761 | { |
762 | gboolean is_zero; |
763 | |
764 | pthread_mutex_lock (&g_atomic_lock); |
765 | is_zero = --(*atomic) == 0; |
766 | pthread_mutex_unlock (&g_atomic_lock); |
767 | |
768 | return is_zero; |
769 | } |
770 | |
771 | gboolean |
772 | (g_atomic_int_compare_and_exchange) (volatile gint *atomic, |
773 | gint oldval, |
774 | gint newval) |
775 | { |
776 | gboolean success; |
777 | |
778 | pthread_mutex_lock (&g_atomic_lock); |
779 | |
780 | if ((success = (*atomic == oldval))) |
781 | *atomic = newval; |
782 | |
783 | pthread_mutex_unlock (&g_atomic_lock); |
784 | |
785 | return success; |
786 | } |
787 | |
788 | gint |
789 | (g_atomic_int_add) (volatile gint *atomic, |
790 | gint val) |
791 | { |
792 | gint oldval; |
793 | |
794 | pthread_mutex_lock (&g_atomic_lock); |
795 | oldval = *atomic; |
796 | *atomic = oldval + val; |
797 | pthread_mutex_unlock (&g_atomic_lock); |
798 | |
799 | return oldval; |
800 | } |
801 | |
802 | guint |
803 | (g_atomic_int_and) (volatile guint *atomic, |
804 | guint val) |
805 | { |
806 | guint oldval; |
807 | |
808 | pthread_mutex_lock (&g_atomic_lock); |
809 | oldval = *atomic; |
810 | *atomic = oldval & val; |
811 | pthread_mutex_unlock (&g_atomic_lock); |
812 | |
813 | return oldval; |
814 | } |
815 | |
816 | guint |
817 | (g_atomic_int_or) (volatile guint *atomic, |
818 | guint val) |
819 | { |
820 | guint oldval; |
821 | |
822 | pthread_mutex_lock (&g_atomic_lock); |
823 | oldval = *atomic; |
824 | *atomic = oldval | val; |
825 | pthread_mutex_unlock (&g_atomic_lock); |
826 | |
827 | return oldval; |
828 | } |
829 | |
830 | guint |
831 | (g_atomic_int_xor) (volatile guint *atomic, |
832 | guint val) |
833 | { |
834 | guint oldval; |
835 | |
836 | pthread_mutex_lock (&g_atomic_lock); |
837 | oldval = *atomic; |
838 | *atomic = oldval ^ val; |
839 | pthread_mutex_unlock (&g_atomic_lock); |
840 | |
841 | return oldval; |
842 | } |
843 | |
844 | |
845 | gpointer |
846 | (g_atomic_pointer_get) (const volatile void *atomic) |
847 | { |
848 | const gpointer *ptr = atomic; |
849 | gpointer value; |
850 | |
851 | pthread_mutex_lock (&g_atomic_lock); |
852 | value = *ptr; |
853 | pthread_mutex_unlock (&g_atomic_lock); |
854 | |
855 | return value; |
856 | } |
857 | |
858 | void |
859 | (g_atomic_pointer_set) (volatile void *atomic, |
860 | gpointer newval) |
861 | { |
862 | gpointer *ptr = atomic; |
863 | |
864 | pthread_mutex_lock (&g_atomic_lock); |
865 | *ptr = newval; |
866 | pthread_mutex_unlock (&g_atomic_lock); |
867 | } |
868 | |
869 | gboolean |
870 | (g_atomic_pointer_compare_and_exchange) (volatile void *atomic, |
871 | gpointer oldval, |
872 | gpointer newval) |
873 | { |
874 | gpointer *ptr = atomic; |
875 | gboolean success; |
876 | |
877 | pthread_mutex_lock (&g_atomic_lock); |
878 | |
879 | if ((success = (*ptr == oldval))) |
880 | *ptr = newval; |
881 | |
882 | pthread_mutex_unlock (&g_atomic_lock); |
883 | |
884 | return success; |
885 | } |
886 | |
887 | gssize |
888 | (g_atomic_pointer_add) (volatile void *atomic, |
889 | gssize val) |
890 | { |
891 | gssize *ptr = atomic; |
892 | gssize oldval; |
893 | |
894 | pthread_mutex_lock (&g_atomic_lock); |
895 | oldval = *ptr; |
896 | *ptr = oldval + val; |
897 | pthread_mutex_unlock (&g_atomic_lock); |
898 | |
899 | return oldval; |
900 | } |
901 | |
902 | gsize |
903 | (g_atomic_pointer_and) (volatile void *atomic, |
904 | gsize val) |
905 | { |
906 | gsize *ptr = atomic; |
907 | gsize oldval; |
908 | |
909 | pthread_mutex_lock (&g_atomic_lock); |
910 | oldval = *ptr; |
911 | *ptr = oldval & val; |
912 | pthread_mutex_unlock (&g_atomic_lock); |
913 | |
914 | return oldval; |
915 | } |
916 | |
917 | gsize |
918 | (g_atomic_pointer_or) (volatile void *atomic, |
919 | gsize val) |
920 | { |
921 | gsize *ptr = atomic; |
922 | gsize oldval; |
923 | |
924 | pthread_mutex_lock (&g_atomic_lock); |
925 | oldval = *ptr; |
926 | *ptr = oldval | val; |
927 | pthread_mutex_unlock (&g_atomic_lock); |
928 | |
929 | return oldval; |
930 | } |
931 | |
932 | gsize |
933 | (g_atomic_pointer_xor) (volatile void *atomic, |
934 | gsize val) |
935 | { |
936 | gsize *ptr = atomic; |
937 | gsize oldval; |
938 | |
939 | pthread_mutex_lock (&g_atomic_lock); |
940 | oldval = *ptr; |
941 | *ptr = oldval ^ val; |
942 | pthread_mutex_unlock (&g_atomic_lock); |
943 | |
944 | return oldval; |
945 | } |
946 | |
947 | #endif |
948 | |
949 | /** |
950 | * g_atomic_int_exchange_and_add: |
951 | * @atomic: a pointer to a #gint |
952 | * @val: the value to add |
953 | * |
954 | * This function existed before g_atomic_int_add() returned the prior |
955 | * value of the integer (which it now does). It is retained only for |
956 | * compatibility reasons. Don't use this function in new code. |
957 | * |
958 | * Returns: the value of @atomic before the add, signed |
959 | * Since: 2.4 |
960 | * Deprecated: 2.30: Use g_atomic_int_add() instead. |
961 | **/ |
962 | gint |
963 | g_atomic_int_exchange_and_add (volatile gint *atomic, |
964 | gint val) |
965 | { |
966 | return (g_atomic_int_add) (atomic: (gint *) atomic, val); |
967 | } |
968 | |