1 | /* g_once_init_*() test |
2 | * Copyright (C) 2007 Tim Janik |
3 | * |
4 | * This work is provided "as is"; redistribution and modification |
5 | * in whole or in part, in any medium, physical or electronic is |
6 | * permitted without restriction. |
7 | |
8 | * This work is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
11 | |
12 | * In no event shall the authors or contributors be liable for any |
13 | * direct, indirect, incidental, special, exemplary, or consequential |
14 | * damages (including, but not limited to, procurement of substitute |
15 | * goods or services; loss of use, data, or profits; or business |
16 | * interruption) however caused and on any theory of liability, whether |
17 | * in contract, strict liability, or tort (including negligence or |
18 | * otherwise) arising in any way out of the use of this software, even |
19 | * if advised of the possibility of such damage. |
20 | */ |
21 | #include <glib.h> |
22 | #include <stdlib.h> |
23 | |
24 | #define N_THREADS (13) |
25 | |
26 | static GMutex tmutex; |
27 | static GCond tcond; |
28 | static int thread_call_count = 0; /* (atomic) */ |
29 | static char dummy_value = 'x'; |
30 | |
31 | static void |
32 | assert_singleton_execution1 (void) |
33 | { |
34 | static int seen_execution = 0; /* (atomic) */ |
35 | int old_seen_execution = g_atomic_int_add (&seen_execution, 1); |
36 | if (old_seen_execution != 0) |
37 | g_error ("%s: function executed more than once" , G_STRFUNC); |
38 | } |
39 | |
40 | static void |
41 | assert_singleton_execution2 (void) |
42 | { |
43 | static int seen_execution = 0; /* (atomic) */ |
44 | int old_seen_execution = g_atomic_int_add (&seen_execution, 1); |
45 | if (old_seen_execution != 0) |
46 | g_error ("%s: function executed more than once" , G_STRFUNC); |
47 | } |
48 | |
49 | static void |
50 | assert_singleton_execution3 (void) |
51 | { |
52 | static int seen_execution = 0; /* (atomic) */ |
53 | int old_seen_execution = g_atomic_int_add (&seen_execution, 1); |
54 | if (old_seen_execution != 0) |
55 | g_error ("%s: function executed more than once" , G_STRFUNC); |
56 | } |
57 | |
58 | static void |
59 | initializer1 (void) |
60 | { |
61 | static gsize initialized = 0; |
62 | if (g_once_init_enter (&initialized)) |
63 | { |
64 | gsize initval = 42; |
65 | assert_singleton_execution1(); |
66 | g_once_init_leave (&initialized, initval); |
67 | } |
68 | } |
69 | |
70 | static gpointer |
71 | initializer2 (void) |
72 | { |
73 | static gsize initialized = 0; |
74 | if (g_once_init_enter (&initialized)) |
75 | { |
76 | void *pointer_value = &dummy_value; |
77 | assert_singleton_execution2(); |
78 | g_once_init_leave (&initialized, (gsize) pointer_value); |
79 | } |
80 | return (void*) initialized; |
81 | } |
82 | |
83 | static void |
84 | initializer3 (void) |
85 | { |
86 | static gsize initialized = 0; |
87 | if (g_once_init_enter (&initialized)) |
88 | { |
89 | gsize initval = 42; |
90 | assert_singleton_execution3(); |
91 | g_usleep (microseconds: 25 * 1000); /* waste time for multiple threads to wait */ |
92 | g_once_init_leave (&initialized, initval); |
93 | } |
94 | } |
95 | |
96 | static gpointer |
97 | tmain_call_initializer3 (gpointer user_data) |
98 | { |
99 | g_mutex_lock (mutex: &tmutex); |
100 | g_cond_wait (cond: &tcond, mutex: &tmutex); |
101 | g_mutex_unlock (mutex: &tmutex); |
102 | //g_printf ("["); |
103 | initializer3(); |
104 | //g_printf ("]\n"); |
105 | g_atomic_int_add (&thread_call_count, 1); |
106 | return NULL; |
107 | } |
108 | |
109 | static void* stress_concurrent_initializers (void*); |
110 | |
111 | int |
112 | main (int argc, |
113 | char *argv[]) |
114 | { |
115 | G_GNUC_UNUSED GThread *threads[N_THREADS]; |
116 | int i; |
117 | void *p; |
118 | |
119 | /* test simple initializer */ |
120 | initializer1(); |
121 | initializer1(); |
122 | /* test pointer initializer */ |
123 | p = initializer2(); |
124 | g_assert (p == &dummy_value); |
125 | p = initializer2(); |
126 | g_assert (p == &dummy_value); |
127 | /* start multiple threads for initializer3() */ |
128 | g_mutex_lock (mutex: &tmutex); |
129 | for (i = 0; i < N_THREADS; i++) |
130 | threads[i] = g_thread_create (func: tmain_call_initializer3, data: 0, FALSE, NULL); |
131 | g_mutex_unlock (mutex: &tmutex); |
132 | /* concurrently call initializer3() */ |
133 | g_cond_broadcast (cond: &tcond); |
134 | /* loop until all threads passed the call to initializer3() */ |
135 | while (g_atomic_int_get (&thread_call_count) < i) |
136 | { |
137 | if (rand() % 2) |
138 | g_thread_yield(); /* concurrent shuffling for single core */ |
139 | else |
140 | g_usleep (microseconds: 1000); /* concurrent shuffling for multi core */ |
141 | g_cond_broadcast (cond: &tcond); |
142 | } |
143 | /* call multiple (unoptimized) initializers from multiple threads */ |
144 | g_mutex_lock (mutex: &tmutex); |
145 | g_atomic_int_set (&thread_call_count, 0); |
146 | for (i = 0; i < N_THREADS; i++) |
147 | g_thread_create (func: stress_concurrent_initializers, data: 0, FALSE, NULL); |
148 | g_mutex_unlock (mutex: &tmutex); |
149 | while (g_atomic_int_get (&thread_call_count) < 256 * 4 * N_THREADS) |
150 | g_usleep (microseconds: 50 * 1000); /* wait for all 5 threads to complete */ |
151 | return 0; |
152 | } |
153 | |
154 | /* get rid of g_once_init_enter-optimizations in the below definitions |
155 | * to uncover possible races in the g_once_init_enter_impl()/ |
156 | * g_once_init_leave() implementations |
157 | */ |
158 | #undef g_once_init_enter |
159 | #undef g_once_init_leave |
160 | |
161 | /* define 16 * 16 simple initializers */ |
162 | #define DEFINE_TEST_INITIALIZER(N) \ |
163 | static void \ |
164 | test_initializer_##N (void) \ |
165 | { \ |
166 | static gsize |
---|