1/*
2 * This file is part of the KDE project.
3 *
4 * SPDX-License-Identifier: LGPL-2.0-only
5 */
6
7#ifndef KSDCMAPPING_P_H
8#define KSDCMAPPING_P_H
9
10#include "kcoreaddons_debug.h"
11#include "ksdcmemory_p.h"
12#include "kshareddatacache.h"
13
14#include <config-caching.h> // HAVE_SYS_MMAN_H
15
16#include <QFile>
17#include <QtGlobal>
18#include <qplatformdefs.h>
19
20#include <sys/resource.h>
21
22#if defined(_POSIX_MAPPED_FILES) && ((_POSIX_MAPPED_FILES == 0) || (_POSIX_MAPPED_FILES >= 200112L))
23#define KSDC_MAPPED_FILES_SUPPORTED 1
24#endif
25
26#if defined(_POSIX_SYNCHRONIZED_IO) && ((_POSIX_SYNCHRONIZED_IO == 0) || (_POSIX_SYNCHRONIZED_IO >= 200112L))
27#define KSDC_SYNCHRONIZED_IO_SUPPORTED 1
28#endif
29
30// msync(2) requires both MAPPED_FILES and SYNCHRONIZED_IO POSIX options
31#if defined(KSDC_MAPPED_FILES_SUPPORTED) && defined(KSDC_SYNCHRONIZED_IO_SUPPORTED)
32#define KSDC_MSYNC_SUPPORTED
33#endif
34
35// BSD/Mac OS X compat
36#if HAVE_SYS_MMAN_H
37#include <sys/mman.h>
38#endif
39#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
40#define MAP_ANONYMOUS MAP_ANON
41#endif
42
43class Q_DECL_HIDDEN KSDCMapping
44{
45public:
46 KSDCMapping(const QFile *file, const uint size, const uint cacheSize, const uint pageSize)
47 : m_mapped(nullptr)
48 , m_lock()
49 , m_mapSize(size)
50 , m_expectedType(LOCKTYPE_INVALID)
51 {
52 mapSharedMemory(file, size, cacheSize, pageSize);
53 }
54
55 ~KSDCMapping()
56 {
57 detachFromSharedMemory(flush: true);
58 }
59
60 bool isValid()
61 {
62 return !!m_mapped;
63 }
64
65 bool lock() const
66 {
67 if (Q_UNLIKELY(!m_mapped)) {
68 return false;
69 }
70 if (Q_LIKELY(m_mapped->shmLock.type == m_expectedType)) {
71 return m_lock->lock();
72 }
73
74 // Wrong type --> corrupt!
75 throw KSDCCorrupted("Invalid cache lock type!");
76 }
77
78 void unlock() const
79 {
80 if (Q_LIKELY(m_lock)) {
81 m_lock->unlock();
82 }
83 }
84
85 // This should be called for any memory access to shared memory. This
86 // function will verify that the bytes [base, base+accessLength) are
87 // actually mapped to m_mapped. The cache itself may have incorrect cache
88 // page sizes, incorrect cache size, etc. so this function should be called
89 // despite the cache data indicating it should be safe.
90 //
91 // If the access is /not/ safe then a KSDCCorrupted exception will be
92 // thrown, so be ready to catch that.
93 void verifyProposedMemoryAccess(const void *base, unsigned accessLength) const
94 {
95 quintptr startOfAccess = reinterpret_cast<quintptr>(base);
96 quintptr startOfShm = reinterpret_cast<quintptr>(m_mapped);
97
98 if (Q_UNLIKELY(startOfAccess < startOfShm)) {
99 throw KSDCCorrupted();
100 }
101
102 quintptr endOfShm = startOfShm + m_mapSize;
103 quintptr endOfAccess = startOfAccess + accessLength;
104
105 // Check for unsigned integer wraparound, and then
106 // bounds access
107 if (Q_UNLIKELY((endOfShm < startOfShm) || (endOfAccess < startOfAccess) || (endOfAccess > endOfShm))) {
108 throw KSDCCorrupted();
109 }
110 }
111
112 // Runs a quick battery of tests on an already-locked cache and returns
113 // false as soon as a sanity check fails. The cache remains locked in this
114 // situation.
115 bool isLockedCacheSafe() const
116 {
117 if (Q_UNLIKELY(!m_mapped)) {
118 return false;
119 }
120
121 // Note that cachePageSize() itself runs a check that can throw.
122 uint testSize = SharedMemory::totalSize(cacheSize: m_mapped->cacheSize, effectivePageSize: m_mapped->cachePageSize());
123
124 if (Q_UNLIKELY(m_mapSize != testSize)) {
125 return false;
126 }
127 if (Q_UNLIKELY(m_mapped->version != SharedMemory::PIXMAP_CACHE_VERSION)) {
128 return false;
129 }
130 switch (m_mapped->evictionPolicy.loadRelaxed()) {
131 case KSharedDataCache::NoEvictionPreference: // fallthrough
132 case KSharedDataCache::EvictLeastRecentlyUsed: // fallthrough
133 case KSharedDataCache::EvictLeastOftenUsed: // fallthrough
134 case KSharedDataCache::EvictOldest:
135 break;
136 default:
137 return false;
138 }
139
140 return true;
141 }
142
143 SharedMemory *m_mapped;
144
145private:
146 // Put the cache in a condition to be able to call mapSharedMemory() by
147 // completely detaching from shared memory (such as to respond to an
148 // unrecoverable error).
149 // m_mapSize must already be set to the amount of memory mapped to m_mapped.
150 void detachFromSharedMemory(const bool flush = false)
151 {
152 // The lock holds a reference into shared memory, so this must be
153 // cleared before m_mapped is removed.
154 m_lock.reset();
155
156 // Note that there is no other actions required to separate from the
157 // shared memory segment, simply unmapping is enough. This makes things
158 // *much* easier so I'd recommend maintaining this ideal.
159 if (m_mapped) {
160#ifdef KSDC_MSYNC_SUPPORTED
161 if (flush) {
162 ::msync(addr: m_mapped, len: m_mapSize, MS_INVALIDATE | MS_ASYNC);
163 }
164#endif
165 ::munmap(addr: m_mapped, len: m_mapSize);
166 if (0 != ::munmap(addr: m_mapped, len: m_mapSize)) {
167 qCCritical(KCOREADDONS_DEBUG) << "Unable to unmap shared memory segment" << static_cast<void *>(m_mapped) << ":" << ::strerror(errno);
168 }
169 }
170
171 // Do not delete m_mapped, it was never constructed, it's just an alias.
172 m_mapped = nullptr;
173 m_mapSize = 0;
174 }
175
176 // This function does a lot of the important work, attempting to connect to shared
177 // memory, a private anonymous mapping if that fails, and failing that, nothing (but
178 // the cache remains "valid", we just don't actually do anything).
179 void mapSharedMemory(const QFile *file, uint size, uint cacheSize, uint pageSize)
180 {
181 void *mapAddress = MAP_FAILED;
182
183 if (file) {
184 // Use mmap directly instead of QFile::map since the QFile (and its
185 // shared mapping) will disappear unless we hang onto the QFile for no
186 // reason (see the note below, we don't care about the file per se...)
187 mapAddress = QT_MMAP(addr: nullptr, len: size, PROT_READ | PROT_WRITE, MAP_SHARED, fd: file->handle(), offset: 0);
188
189 // So... it is possible that someone else has mapped this cache already
190 // with a larger size. If that's the case we need to at least match
191 // the size to be able to access every entry, so fixup the mapping.
192 if (mapAddress != MAP_FAILED) {
193 // Successful mmap doesn't actually mean that whole range is readable so ensure it is
194 struct rlimit memlock;
195 if (getrlimit(RLIMIT_MEMLOCK, rlimits: &memlock) == 0 && memlock.rlim_cur >= 2) {
196 // Half of limit in case something else has already locked some mem
197 uint lockSize = qMin(a: memlock.rlim_cur / 2, b: (rlim_t)size);
198 // Note that lockSize might be less than what we need to mmap
199 // and so this doesn't guarantee that later parts will be readable
200 // but that's fine, at least we know we will succeed here
201 if (mlock(addr: mapAddress, len: lockSize)) {
202 throw KSDCCorrupted(QLatin1String("Cache is inaccessible ") + file->fileName());
203 }
204 if (munlock(addr: mapAddress, len: lockSize) != 0) {
205 qCDebug(KCOREADDONS_DEBUG) << "Failed to munlock!";
206 }
207 } else {
208 qCWarning(KCOREADDONS_DEBUG) << "Failed to get RLIMIT_MEMLOCK!";
209 }
210
211 SharedMemory *mapped = reinterpret_cast<SharedMemory *>(mapAddress);
212
213 // First make sure that the version of the cache on disk is
214 // valid. We also need to check that version != 0 to
215 // disambiguate against an uninitialized cache.
216 if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION && mapped->version > 0) {
217 detachFromSharedMemory(flush: false);
218 throw KSDCCorrupted(QLatin1String("Wrong version of cache ") + file->fileName());
219 } else if (mapped->cacheSize > cacheSize) {
220 // This order is very important. We must save the cache size
221 // before we remove the mapping, but unmap before overwriting
222 // the previous mapping size...
223 auto actualCacheSize = mapped->cacheSize;
224 auto actualPageSize = mapped->cachePageSize();
225 ::munmap(addr: mapAddress, len: size);
226 size = SharedMemory::totalSize(cacheSize, effectivePageSize: pageSize);
227 mapAddress = QT_MMAP(addr: nullptr, len: size, PROT_READ | PROT_WRITE, MAP_SHARED, fd: file->handle(), offset: 0);
228 if (mapAddress != MAP_FAILED) {
229 cacheSize = actualCacheSize;
230 pageSize = actualPageSize;
231 }
232 }
233 }
234 }
235
236 // We could be here without the mapping established if:
237 // 1) Process-shared synchronization is not supported, either at compile or run time,
238 // 2) Unable to open the required file.
239 // 3) Unable to resize the file to be large enough.
240 // 4) Establishing the mapping failed.
241 // 5) The mapping succeeded, but the size was wrong and we were unable to map when
242 // we tried again.
243 // 6) The incorrect version of the cache was detected.
244 // 7) The file could be created, but posix_fallocate failed to commit it fully to disk.
245 // In any of these cases, attempt to fallback to the
246 // better-supported anonymous page style of mmap.
247 // NOTE: We never use the on-disk representation independently of the
248 // shared memory. If we don't get shared memory the disk info is ignored,
249 // if we do get shared memory we never look at disk again.
250 if (!file || mapAddress == MAP_FAILED) {
251 qCWarning(KCOREADDONS_DEBUG) << "Couldn't establish file backed memory mapping, will fallback"
252 << "to anonymous memory";
253 mapAddress = QT_MMAP(addr: nullptr, len: size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, fd: -1, offset: 0);
254 }
255
256 // Well now we're really hosed. We can still work, but we can't even cache
257 // data.
258 if (mapAddress == MAP_FAILED) {
259 qCCritical(KCOREADDONS_DEBUG) << "Unable to allocate shared memory segment for shared data cache" << file->fileName() << "of size" << m_mapSize;
260 m_mapped = nullptr;
261 m_mapSize = 0;
262 return;
263 }
264
265 m_mapSize = size;
266
267 // We never actually construct m_mapped, but we assign it the same address as the
268 // shared memory we just mapped, so effectively m_mapped is now a SharedMemory that
269 // happens to be located at mapAddress.
270 m_mapped = reinterpret_cast<SharedMemory *>(mapAddress);
271
272 // If we were first to create this memory map, all data will be 0.
273 // Therefore if ready == 0 we're not initialized. A fully initialized
274 // header will have ready == 2. Why?
275 // Because 0 means "safe to initialize"
276 // 1 means "in progress of initing"
277 // 2 means "ready"
278 uint usecSleepTime = 8; // Start by sleeping for 8 microseconds
279 while (m_mapped->ready.loadRelaxed() != 2) {
280 if (Q_UNLIKELY(usecSleepTime >= (1 << 21))) {
281 // Didn't acquire within ~8 seconds? Assume an issue exists
282 detachFromSharedMemory(flush: false);
283 throw KSDCCorrupted("Unable to acquire shared lock, is the cache corrupt?");
284 }
285
286 if (m_mapped->ready.testAndSetAcquire(expectedValue: 0, newValue: 1)) {
287 if (!m_mapped->performInitialSetup(cacheSize: cacheSize, pageSize: pageSize)) {
288 qCCritical(KCOREADDONS_DEBUG) << "Unable to perform initial setup, this system probably "
289 "does not really support process-shared pthreads or "
290 "semaphores, even though it claims otherwise.";
291
292 detachFromSharedMemory(flush: false);
293 return;
294 }
295 } else {
296 usleep(useconds: usecSleepTime); // spin
297
298 // Exponential fallback as in Ethernet and similar collision resolution methods
299 usecSleepTime *= 2;
300 }
301 }
302
303 m_expectedType = m_mapped->shmLock.type;
304 m_lock.reset(p: createLockFromId(id: m_expectedType, lock&: m_mapped->shmLock));
305 bool isProcessSharingSupported = false;
306
307 if (!m_lock->initialize(processSharingSupported&: isProcessSharingSupported)) {
308 qCCritical(KCOREADDONS_DEBUG) << "Unable to setup shared cache lock, although it worked when created.";
309 detachFromSharedMemory(flush: false);
310 return;
311 }
312 }
313
314 std::unique_ptr<KSDCLock> m_lock;
315 uint m_mapSize;
316 SharedLockId m_expectedType;
317};
318
319#endif /* KSDCMEMORY_P_H */
320

source code of kcoreaddons/src/lib/caching/ksdcmapping_p.h