| 1 | /* |
| 2 | * This file is part of the KDE project. |
| 3 | * |
| 4 | * SPDX-FileCopyrightText: 2010 Michael Pyne <mpyne@kde.org> |
| 5 | * SPDX-License-Identifier: LGPL-2.0-only |
| 6 | */ |
| 7 | |
| 8 | #ifndef KSDCLOCK_P_H |
| 9 | #define KSDCLOCK_P_H |
| 10 | |
| 11 | #include <qbasicatomic.h> |
| 12 | |
| 13 | #include <sched.h> // sched_yield |
| 14 | #include <unistd.h> // Check for sched_yield |
| 15 | |
| 16 | // Mac OS X, for all its POSIX compliance, does not support timeouts on its |
| 17 | // mutexes, which is kind of a disaster for cross-process support. However |
| 18 | // synchronization primitives still work, they just might hang if the cache is |
| 19 | // corrupted, so keep going. |
| 20 | #if defined(_POSIX_TIMEOUTS) && ((_POSIX_TIMEOUTS == 0) || (_POSIX_TIMEOUTS >= 200112L)) |
| 21 | #define KSDC_TIMEOUTS_SUPPORTED 1 |
| 22 | #endif |
| 23 | |
| 24 | #if defined(__GNUC__) && !defined(KSDC_TIMEOUTS_SUPPORTED) |
| 25 | #warning "No support for POSIX timeouts -- application hangs are possible if the cache is corrupt" |
| 26 | #endif |
| 27 | |
| 28 | #if defined(_POSIX_THREAD_PROCESS_SHARED) && ((_POSIX_THREAD_PROCESS_SHARED == 0) || (_POSIX_THREAD_PROCESS_SHARED >= 200112L)) && !defined(__APPLE__) |
| 29 | #include <pthread.h> |
| 30 | #define KSDC_THREAD_PROCESS_SHARED_SUPPORTED 1 |
| 31 | #endif |
| 32 | |
| 33 | #if defined(_POSIX_SEMAPHORES) && ((_POSIX_SEMAPHORES == 0) || (_POSIX_SEMAPHORES >= 200112L)) |
| 34 | #include <semaphore.h> |
| 35 | #define KSDC_SEMAPHORES_SUPPORTED 1 |
| 36 | #endif |
| 37 | |
| 38 | #if defined(__GNUC__) && !defined(KSDC_SEMAPHORES_SUPPORTED) && !defined(KSDC_THREAD_PROCESS_SHARED_SUPPORTED) |
| 39 | #warning "No system support claimed for process-shared synchronization, KSharedDataCache will be mostly useless." |
| 40 | #endif |
| 41 | |
| 42 | /* |
| 43 | * This class defines an interface used by KSharedDataCache::Private to offload |
| 44 | * proper locking and unlocking depending on what the platform supports at |
| 45 | * runtime and compile-time. |
| 46 | */ |
| 47 | class KSDCLock |
| 48 | { |
| 49 | public: |
| 50 | virtual ~KSDCLock() |
| 51 | { |
| 52 | } |
| 53 | |
| 54 | // Return value indicates if the mutex was properly initialized (including |
| 55 | // threads-only as a fallback). |
| 56 | virtual bool initialize(bool &processSharingSupported) |
| 57 | { |
| 58 | processSharingSupported = false; |
| 59 | return false; |
| 60 | } |
| 61 | |
| 62 | virtual bool lock() |
| 63 | { |
| 64 | return false; |
| 65 | } |
| 66 | |
| 67 | virtual void unlock() |
| 68 | { |
| 69 | } |
| 70 | }; |
| 71 | |
| 72 | /* |
| 73 | * This is a very basic lock that should work on any system where GCC atomic |
| 74 | * intrinsics are supported. It can waste CPU so better primitives should be |
| 75 | * used if available on the system. |
| 76 | */ |
| 77 | class simpleSpinLock : public KSDCLock |
| 78 | { |
| 79 | public: |
| 80 | simpleSpinLock(QBasicAtomicInt &spinlock) |
| 81 | : m_spinlock(spinlock) |
| 82 | { |
| 83 | } |
| 84 | |
| 85 | bool initialize(bool &processSharingSupported) override |
| 86 | { |
| 87 | // Clear the spinlock |
| 88 | m_spinlock.storeRelaxed(newValue: 0); |
| 89 | processSharingSupported = true; |
| 90 | return true; |
| 91 | } |
| 92 | |
| 93 | bool lock() override |
| 94 | { |
| 95 | // Spin a few times attempting to gain the lock, as upper-level code won't |
| 96 | // attempt again without assuming the cache is corrupt. |
| 97 | for (unsigned i = 50; i > 0; --i) { |
| 98 | if (m_spinlock.testAndSetAcquire(expectedValue: 0, newValue: 1)) { |
| 99 | return true; |
| 100 | } |
| 101 | |
| 102 | // Don't steal the processor and starve the thread we're waiting |
| 103 | // on. |
| 104 | loopSpinPause(); |
| 105 | } |
| 106 | |
| 107 | return false; |
| 108 | } |
| 109 | |
| 110 | void unlock() override |
| 111 | { |
| 112 | m_spinlock.testAndSetRelease(expectedValue: 1, newValue: 0); |
| 113 | } |
| 114 | |
| 115 | private: |
| 116 | #ifdef Q_CC_GNU |
| 117 | __attribute__((always_inline, |
| 118 | gnu_inline |
| 119 | #if !defined(Q_CC_INTEL) && !defined(Q_CC_CLANG) |
| 120 | , |
| 121 | artificial |
| 122 | #endif |
| 123 | )) |
| 124 | #endif |
| 125 | static inline void |
| 126 | loopSpinPause() |
| 127 | { |
| 128 | // TODO: Spinning might be better in multi-core systems... but that means |
| 129 | // figuring how to find numbers of CPUs in a cross-platform way. |
| 130 | #ifdef _POSIX_PRIORITY_SCHEDULING |
| 131 | sched_yield(); |
| 132 | #else |
| 133 | // Sleep for shortest possible time (nanosleep should round-up). |
| 134 | struct timespec wait_time = {0 /* sec */, 100 /* ns */}; |
| 135 | ::nanosleep(&wait_time, static_cast<struct timespec *>(0)); |
| 136 | #endif |
| 137 | } |
| 138 | |
| 139 | QBasicAtomicInt &m_spinlock; |
| 140 | }; |
| 141 | |
| 142 | #ifdef KSDC_THREAD_PROCESS_SHARED_SUPPORTED |
| 143 | class pthreadLock : public KSDCLock |
| 144 | { |
| 145 | public: |
| 146 | pthreadLock(pthread_mutex_t &mutex) |
| 147 | : m_mutex(mutex) |
| 148 | { |
| 149 | } |
| 150 | |
| 151 | bool initialize(bool &processSharingSupported) override |
| 152 | { |
| 153 | // Setup process-sharing. |
| 154 | pthread_mutexattr_t mutexAttr; |
| 155 | processSharingSupported = false; |
| 156 | |
| 157 | // Initialize attributes, enable process-shared primitives, and setup |
| 158 | // the mutex. |
| 159 | if (::sysconf(_SC_THREAD_PROCESS_SHARED) >= 200112L && pthread_mutexattr_init(attr: &mutexAttr) == 0) { |
| 160 | if (pthread_mutexattr_setpshared(attr: &mutexAttr, PTHREAD_PROCESS_SHARED) == 0 && pthread_mutex_init(mutex: &m_mutex, mutexattr: &mutexAttr) == 0) { |
| 161 | processSharingSupported = true; |
| 162 | } |
| 163 | pthread_mutexattr_destroy(attr: &mutexAttr); |
| 164 | } |
| 165 | |
| 166 | // Attempt to setup for thread-only synchronization. |
| 167 | if (!processSharingSupported && pthread_mutex_init(mutex: &m_mutex, mutexattr: nullptr) != 0) { |
| 168 | return false; |
| 169 | } |
| 170 | |
| 171 | return true; |
| 172 | } |
| 173 | |
| 174 | bool lock() override |
| 175 | { |
| 176 | return pthread_mutex_lock(mutex: &m_mutex) == 0; |
| 177 | } |
| 178 | |
| 179 | void unlock() override |
| 180 | { |
| 181 | pthread_mutex_unlock(mutex: &m_mutex); |
| 182 | } |
| 183 | |
| 184 | protected: |
| 185 | pthread_mutex_t &m_mutex; |
| 186 | }; |
| 187 | #endif // KSDC_THREAD_PROCESS_SHARED_SUPPORTED |
| 188 | |
| 189 | #if defined(KSDC_THREAD_PROCESS_SHARED_SUPPORTED) && defined(KSDC_TIMEOUTS_SUPPORTED) |
| 190 | class pthreadTimedLock : public pthreadLock |
| 191 | { |
| 192 | public: |
| 193 | pthreadTimedLock(pthread_mutex_t &mutex) |
| 194 | : pthreadLock(mutex) |
| 195 | { |
| 196 | } |
| 197 | |
| 198 | bool lock() override |
| 199 | { |
| 200 | struct timespec timeout; |
| 201 | |
| 202 | // Long timeout, but if we fail to meet this timeout it's probably a cache |
| 203 | // corruption (and if we take 8 seconds then it should be much much quicker |
| 204 | // the next time anyways since we'd be paged back in from disk) |
| 205 | timeout.tv_sec = 10 + ::time(timer: nullptr); // Absolute time, so 10 seconds from now |
| 206 | timeout.tv_nsec = 0; |
| 207 | |
| 208 | return pthread_mutex_timedlock(mutex: &m_mutex, abstime: &timeout) == 0; |
| 209 | } |
| 210 | }; |
| 211 | #endif // defined(KSDC_THREAD_PROCESS_SHARED_SUPPORTED) && defined(KSDC_TIMEOUTS_SUPPORTED) |
| 212 | |
| 213 | #ifdef KSDC_SEMAPHORES_SUPPORTED |
| 214 | class semaphoreLock : public KSDCLock |
| 215 | { |
| 216 | public: |
| 217 | semaphoreLock(sem_t &semaphore) |
| 218 | : m_semaphore(semaphore) |
| 219 | { |
| 220 | } |
| 221 | |
| 222 | bool initialize(bool &processSharingSupported) override |
| 223 | { |
| 224 | processSharingSupported = false; |
| 225 | if (::sysconf(_SC_SEMAPHORES) < 200112L) { |
| 226 | return false; |
| 227 | } |
| 228 | |
| 229 | // sem_init sets up process-sharing for us. |
| 230 | if (sem_init(sem: &m_semaphore, pshared: 1, value: 1) == 0) { |
| 231 | processSharingSupported = true; |
| 232 | } |
| 233 | // If not successful try falling back to thread-shared. |
| 234 | else if (sem_init(sem: &m_semaphore, pshared: 0, value: 1) != 0) { |
| 235 | return false; |
| 236 | } |
| 237 | |
| 238 | return true; |
| 239 | } |
| 240 | |
| 241 | bool lock() override |
| 242 | { |
| 243 | return sem_wait(sem: &m_semaphore) == 0; |
| 244 | } |
| 245 | |
| 246 | void unlock() override |
| 247 | { |
| 248 | sem_post(sem: &m_semaphore); |
| 249 | } |
| 250 | |
| 251 | protected: |
| 252 | sem_t &m_semaphore; |
| 253 | }; |
| 254 | #endif // KSDC_SEMAPHORES_SUPPORTED |
| 255 | |
| 256 | #if defined(KSDC_SEMAPHORES_SUPPORTED) && defined(KSDC_TIMEOUTS_SUPPORTED) |
| 257 | class semaphoreTimedLock : public semaphoreLock |
| 258 | { |
| 259 | public: |
| 260 | semaphoreTimedLock(sem_t &semaphore) |
| 261 | : semaphoreLock(semaphore) |
| 262 | { |
| 263 | } |
| 264 | |
| 265 | bool lock() override |
| 266 | { |
| 267 | struct timespec timeout; |
| 268 | |
| 269 | // Long timeout, but if we fail to meet this timeout it's probably a cache |
| 270 | // corruption (and if we take 8 seconds then it should be much much quicker |
| 271 | // the next time anyways since we'd be paged back in from disk) |
| 272 | timeout.tv_sec = 10 + ::time(timer: nullptr); // Absolute time, so 10 seconds from now |
| 273 | timeout.tv_nsec = 0; |
| 274 | |
| 275 | return sem_timedwait(sem: &m_semaphore, abstime: &timeout) == 0; |
| 276 | } |
| 277 | }; |
| 278 | #endif // defined(KSDC_SEMAPHORES_SUPPORTED) && defined(KSDC_TIMEOUTS_SUPPORTED) |
| 279 | |
| 280 | // This enum controls the type of the locking used for the cache to allow |
| 281 | // for as much portability as possible. This value will be stored in the |
| 282 | // cache and used by multiple processes, therefore you should consider this |
| 283 | // a versioned field, do not re-arrange. |
| 284 | enum SharedLockId { |
| 285 | LOCKTYPE_INVALID = 0, |
| 286 | LOCKTYPE_MUTEX = 1, // pthread_mutex |
| 287 | LOCKTYPE_SEMAPHORE = 2, // sem_t |
| 288 | LOCKTYPE_SPINLOCK = 3, // atomic int in shared memory |
| 289 | }; |
| 290 | |
| 291 | // This type is a union of all possible lock types, with a SharedLockId used |
| 292 | // to choose which one is actually in use. |
| 293 | struct SharedLock { |
| 294 | union { |
| 295 | #if defined(KSDC_THREAD_PROCESS_SHARED_SUPPORTED) |
| 296 | pthread_mutex_t mutex; |
| 297 | #endif |
| 298 | #if defined(KSDC_SEMAPHORES_SUPPORTED) |
| 299 | sem_t semaphore; |
| 300 | #endif |
| 301 | QBasicAtomicInt spinlock; |
| 302 | |
| 303 | // It would be highly unfortunate if a simple glibc upgrade or kernel |
| 304 | // addition caused this structure to change size when an existing |
| 305 | // lock was thought present, so reserve enough size to cover any |
| 306 | // reasonable locking structure |
| 307 | char unused[64]; |
| 308 | }; |
| 309 | |
| 310 | SharedLockId type; |
| 311 | }; |
| 312 | |
| 313 | /* |
| 314 | * This is a method to determine the best lock type to use for a |
| 315 | * shared cache, based on local support. An identifier to the appropriate |
| 316 | * SharedLockId is returned, which can be passed to createLockFromId(). |
| 317 | */ |
| 318 | SharedLockId findBestSharedLock(); |
| 319 | |
| 320 | KSDCLock *createLockFromId(SharedLockId id, SharedLock &lock); |
| 321 | |
| 322 | #endif /* KSDCLOCK_P_H */ |
| 323 | |