1/*
2 This file is part of the KDE project.
3
4 SPDX-FileCopyrightText: 2010, 2012 Michael Pyne <mpyne@kde.org>
5 SPDX-FileCopyrightText: 2012 Ralf Jung <ralfjung-e@gmx.de>
6
7 SPDX-License-Identifier: LGPL-2.0-only
8*/
9
10#include "kshareddatacache.h"
11#include "kcoreaddons_debug.h"
12#include "ksdcmapping_p.h"
13#include "ksdcmemory_p.h"
14
15#include "kshareddatacache_p.h" // Various auxiliary support code
16
17#include <QByteArray>
18#include <QDir>
19#include <QFile>
20#include <QRandomGenerator>
21#include <QStandardPaths>
22
23// The per-instance private data, such as map size, whether
24// attached or not, pointer to shared memory, etc.
25class Q_DECL_HIDDEN KSharedDataCache::Private
26{
27public:
28 Private(const QString &name, unsigned defaultCacheSize, unsigned expectedItemSize)
29 : m_cacheName(name)
30 , shm(nullptr)
31 , m_mapping(nullptr)
32 , m_defaultCacheSize(defaultCacheSize)
33 , m_expectedItemSize(expectedItemSize)
34 {
35 createMemoryMapping();
36 }
37
38 void createMemoryMapping()
39 {
40 shm = nullptr;
41 m_mapping.reset();
42
43 // 0-sized caches are fairly useless.
44 unsigned cacheSize = qMax(a: m_defaultCacheSize, b: uint(SharedMemory::MINIMUM_CACHE_SIZE));
45 unsigned pageSize = SharedMemory::equivalentPageSize(itemSize: m_expectedItemSize);
46
47 // Ensure that the cache is sized such that there is a minimum number of
48 // pages available. (i.e. a cache consisting of only 1 page is fairly
49 // useless and probably crash-prone).
50 cacheSize = qMax(a: pageSize * 256, b: cacheSize);
51
52 // The m_cacheName is used to find the file to store the cache in.
53 const QString cacheDir = QStandardPaths::writableLocation(type: QStandardPaths::GenericCacheLocation);
54 QString cacheName = cacheDir + QLatin1String("/") + m_cacheName + QLatin1String(".kcache");
55 QFile file(cacheName);
56 QFileInfo fileInfo(file);
57 if (!QDir().mkpath(dirPath: fileInfo.absolutePath())) {
58 qCWarning(KCOREADDONS_DEBUG) << "Failed to create cache dir" << fileInfo.absolutePath();
59 }
60
61 // The basic idea is to open the file that we want to map into shared
62 // memory, and then actually establish the mapping. Once we have mapped the
63 // file into shared memory we can close the file handle, the mapping will
64 // still be maintained (unless the file is resized to be shorter than
65 // expected, which we don't handle yet :-( )
66
67 // size accounts for the overhead over the desired cacheSize
68 uint size = SharedMemory::totalSize(cacheSize, effectivePageSize: pageSize);
69 Q_ASSERT(size >= cacheSize);
70
71 // Open the file and resize to some sane value if the file is too small.
72 if (file.open(flags: QIODevice::ReadWrite) && (file.size() >= size || (ensureFileAllocated(fd: file.handle(), fileSize: size) && file.resize(sz: size)))) {
73 try {
74 m_mapping.reset(p: new KSDCMapping(&file, size, cacheSize, pageSize));
75 shm = m_mapping->m_mapped;
76 } catch (KSDCCorrupted) {
77 shm = nullptr;
78 m_mapping.reset();
79
80 qCWarning(KCOREADDONS_DEBUG) << "Deleting corrupted cache" << cacheName;
81 file.remove();
82 QFile file(cacheName);
83 if (file.open(flags: QIODevice::ReadWrite) && ensureFileAllocated(fd: file.handle(), fileSize: size) && file.resize(sz: size)) {
84 try {
85 m_mapping.reset(p: new KSDCMapping(&file, size, cacheSize, pageSize));
86 } catch (KSDCCorrupted) {
87 m_mapping.reset();
88 qCCritical(KCOREADDONS_DEBUG) << "Even a brand-new cache starts off corrupted, something is"
89 << "seriously wrong. :-(";
90 }
91 }
92 }
93 }
94
95 if (!m_mapping) {
96 m_mapping.reset(p: new KSDCMapping(nullptr, size, cacheSize, pageSize));
97 shm = m_mapping->m_mapped;
98 }
99 }
100
101 // Called whenever the cache is apparently corrupt (for instance, a timeout trying to
102 // lock the cache). In this situation it is safer just to destroy it all and try again.
103 void recoverCorruptedCache()
104 {
105 qCWarning(KCOREADDONS_DEBUG) << "Deleting corrupted cache" << m_cacheName;
106
107 KSharedDataCache::deleteCache(cacheName: m_cacheName);
108
109 createMemoryMapping();
110 }
111
112 class CacheLocker
113 {
114 mutable Private *d;
115
116 bool cautiousLock()
117 {
118 int lockCount = 0;
119
120 // Locking can fail due to a timeout. If it happens too often even though
121 // we're taking corrective action assume there's some disastrous problem
122 // and give up.
123 while (!d->m_mapping->lock() && !d->m_mapping->isLockedCacheSafe()) {
124 d->recoverCorruptedCache();
125
126 if (!d->m_mapping->isValid()) {
127 qCWarning(KCOREADDONS_DEBUG) << "Lost the connection to shared memory for cache" << d->m_cacheName;
128 return false;
129 }
130
131 if (lockCount++ > 4) {
132 qCCritical(KCOREADDONS_DEBUG) << "There is a very serious problem with the KDE data cache" << d->m_cacheName
133 << "giving up trying to access cache.";
134 return false;
135 }
136 }
137
138 return true;
139 }
140
141 public:
142 CacheLocker(const Private *_d)
143 : d(const_cast<Private *>(_d))
144 {
145 if (Q_UNLIKELY(!d || !cautiousLock())) {
146 d = nullptr;
147 }
148 }
149
150 ~CacheLocker()
151 {
152 if (d) {
153 d->m_mapping->unlock();
154 }
155 }
156
157 CacheLocker(const CacheLocker &) = delete;
158 CacheLocker &operator=(const CacheLocker &) = delete;
159
160 bool failed() const
161 {
162 return !d;
163 }
164 };
165
166 QString m_cacheName;
167 SharedMemory *shm;
168 std::unique_ptr<KSDCMapping> m_mapping;
169 uint m_defaultCacheSize;
170 uint m_expectedItemSize;
171};
172
173KSharedDataCache::KSharedDataCache(const QString &cacheName, unsigned defaultCacheSize, unsigned expectedItemSize)
174 : d(nullptr)
175{
176 try {
177 d = new Private(cacheName, defaultCacheSize, expectedItemSize);
178 } catch (KSDCCorrupted) {
179 qCCritical(KCOREADDONS_DEBUG) << "Failed to initialize KSharedDataCache!";
180 d = nullptr; // Just in case
181 }
182}
183
184KSharedDataCache::~KSharedDataCache()
185{
186 if (!d) {
187 return;
188 }
189
190 delete d;
191}
192
193bool KSharedDataCache::insert(const QString &key, const QByteArray &data)
194{
195 try {
196 Private::CacheLocker lock(d);
197 if (lock.failed()) {
198 return false;
199 }
200
201 QByteArray encodedKey = key.toUtf8();
202 uint keyHash = SharedMemory::generateHash(buffer: encodedKey);
203 uint position = keyHash % d->shm->indexTableSize();
204
205 // See if we're overwriting an existing entry.
206 IndexTableEntry *indices = d->shm->indexTable();
207
208 // In order to avoid the issue of a very long-lived cache having items
209 // with a use count of 1 near-permanently, we attempt to artifically
210 // reduce the use count of long-lived items when there is high load on
211 // the cache. We do this randomly, with a weighting that makes the event
212 // impossible if load < 0.5, and guaranteed if load >= 0.96.
213 const static double startCullPoint = 0.5l;
214 const static double mustCullPoint = 0.96l;
215
216 // cacheAvail is in pages, cacheSize is in bytes.
217 double loadFactor = 1.0 - (1.0l * d->shm->cacheAvail * d->shm->cachePageSize() / d->shm->cacheSize);
218 bool cullCollisions = false;
219
220 if (Q_UNLIKELY(loadFactor >= mustCullPoint)) {
221 cullCollisions = true;
222 } else if (loadFactor > startCullPoint) {
223 const int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint);
224 if (QRandomGenerator::global()->bounded(RAND_MAX) >= tripWireValue) {
225 cullCollisions = true;
226 }
227 }
228
229 // In case of collisions in the index table (i.e. identical positions), use
230 // quadratic chaining to attempt to find an empty slot. The equation we use
231 // is:
232 // position = (hash + (i + i*i) / 2) % size, where i is the probe number.
233 uint probeNumber = 1;
234 while (indices[position].useCount > 0 && probeNumber < SharedMemory::MAX_PROBE_COUNT) {
235 // If we actually stumbled upon an old version of the key we are
236 // overwriting, then use that position, do not skip over it.
237
238 if (Q_UNLIKELY(indices[position].fileNameHash == keyHash)) {
239 break;
240 }
241
242 // If we are "culling" old entries, see if this one is old and if so
243 // reduce its use count. If it reduces to zero then eliminate it and
244 // use its old spot.
245
246 if (cullCollisions && (::time(timer: nullptr) - indices[position].lastUsedTime) > 60) {
247 indices[position].useCount >>= 1;
248 if (indices[position].useCount == 0) {
249 qCDebug(KCOREADDONS_DEBUG) << "Overwriting existing old cached entry due to collision.";
250 d->shm->removeEntry(index: position); // Remove it first
251 break;
252 }
253 }
254
255 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2) % d->shm->indexTableSize();
256 probeNumber++;
257 }
258
259 if (indices[position].useCount > 0 && indices[position].firstPage >= 0) {
260 qCDebug(KCOREADDONS_DEBUG) << "Overwriting existing cached entry due to collision.";
261 d->shm->removeEntry(index: position); // Remove it first
262 }
263
264 // Data will be stored as fileNamefoo\0PNGimagedata.....
265 // So total size required is the length of the encoded file name + 1
266 // for the trailing null, and then the length of the image data.
267 uint fileNameLength = 1 + encodedKey.length();
268 uint requiredSize = fileNameLength + data.size();
269 uint pagesNeeded = SharedMemory::intCeil(a: requiredSize, b: d->shm->cachePageSize());
270 uint firstPage(-1);
271
272 if (pagesNeeded >= d->shm->pageTableSize()) {
273 qCWarning(KCOREADDONS_DEBUG) << key << "is too large to be cached.";
274 return false;
275 }
276
277 // If the cache has no room, or the fragmentation is too great to find
278 // the required number of consecutive free pages, take action.
279 if (pagesNeeded > d->shm->cacheAvail || (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize()) {
280 // If we have enough free space just defragment
281 uint freePagesDesired = 3 * qMax(a: 1u, b: pagesNeeded / 2);
282
283 if (d->shm->cacheAvail > freePagesDesired) {
284 // TODO: How the hell long does this actually take on real
285 // caches?
286 d->shm->defragment();
287 firstPage = d->shm->findEmptyPages(pagesNeeded);
288 } else {
289 // If we already have free pages we don't want to remove a ton
290 // extra. However we can't rely on the return value of
291 // removeUsedPages giving us a good location since we're not
292 // passing in the actual number of pages that we need.
293 d->shm->removeUsedPages(numberNeeded: qMin(a: 2 * freePagesDesired, b: d->shm->pageTableSize()) - d->shm->cacheAvail);
294 firstPage = d->shm->findEmptyPages(pagesNeeded);
295 }
296
297 if (firstPage >= d->shm->pageTableSize() || d->shm->cacheAvail < pagesNeeded) {
298 qCCritical(KCOREADDONS_DEBUG) << "Unable to free up memory for" << key;
299 return false;
300 }
301 }
302
303 // Update page table
304 PageTableEntry *table = d->shm->pageTable();
305 for (uint i = 0; i < pagesNeeded; ++i) {
306 table[firstPage + i].index = position;
307 }
308
309 // Update index
310 indices[position].fileNameHash = keyHash;
311 indices[position].totalItemSize = requiredSize;
312 indices[position].useCount = 1;
313 indices[position].addTime = ::time(timer: nullptr);
314 indices[position].lastUsedTime = indices[position].addTime;
315 indices[position].firstPage = firstPage;
316
317 // Update cache
318 d->shm->cacheAvail -= pagesNeeded;
319
320 // Actually move the data in place
321 void *dataPage = d->shm->page(at: firstPage);
322 if (Q_UNLIKELY(!dataPage)) {
323 throw KSDCCorrupted();
324 }
325
326 // Verify it will all fit
327 d->m_mapping->verifyProposedMemoryAccess(base: dataPage, accessLength: requiredSize);
328
329 // Cast for byte-sized pointer arithmetic
330 uchar *startOfPageData = reinterpret_cast<uchar *>(dataPage);
331 ::memcpy(dest: startOfPageData, src: encodedKey.constData(), n: fileNameLength);
332 ::memcpy(dest: startOfPageData + fileNameLength, src: data.constData(), n: data.size());
333
334 return true;
335 } catch (KSDCCorrupted) {
336 d->recoverCorruptedCache();
337 return false;
338 }
339}
340
341bool KSharedDataCache::find(const QString &key, QByteArray *destination) const
342{
343 try {
344 Private::CacheLocker lock(d);
345 if (lock.failed()) {
346 return false;
347 }
348
349 // Search in the index for our data, hashed by key;
350 QByteArray encodedKey = key.toUtf8();
351 qint32 entry = d->shm->findNamedEntry(key: encodedKey);
352
353 if (entry >= 0) {
354 const IndexTableEntry *header = &d->shm->indexTable()[entry];
355 const void *resultPage = d->shm->page(at: header->firstPage);
356 if (Q_UNLIKELY(!resultPage)) {
357 throw KSDCCorrupted();
358 }
359
360 d->m_mapping->verifyProposedMemoryAccess(base: resultPage, accessLength: header->totalItemSize);
361
362 header->useCount++;
363 header->lastUsedTime = ::time(timer: nullptr);
364
365 // Our item is the key followed immediately by the data, so skip
366 // past the key.
367 const char *cacheData = reinterpret_cast<const char *>(resultPage);
368 cacheData += encodedKey.size();
369 cacheData++; // Skip trailing null -- now we're pointing to start of data
370
371 if (destination) {
372 *destination = QByteArray(cacheData, header->totalItemSize - encodedKey.size() - 1);
373 }
374
375 return true;
376 }
377 } catch (KSDCCorrupted) {
378 d->recoverCorruptedCache();
379 }
380
381 return false;
382}
383
384void KSharedDataCache::clear()
385{
386 try {
387 Private::CacheLocker lock(d);
388
389 if (!lock.failed()) {
390 d->shm->clear();
391 }
392 } catch (KSDCCorrupted) {
393 d->recoverCorruptedCache();
394 }
395}
396
397bool KSharedDataCache::contains(const QString &key) const
398{
399 try {
400 Private::CacheLocker lock(d);
401 if (lock.failed()) {
402 return false;
403 }
404
405 return d->shm->findNamedEntry(key: key.toUtf8()) >= 0;
406 } catch (KSDCCorrupted) {
407 d->recoverCorruptedCache();
408 return false;
409 }
410}
411
412void KSharedDataCache::deleteCache(const QString &cacheName)
413{
414 QString cachePath = QStandardPaths::writableLocation(type: QStandardPaths::GenericCacheLocation) + QLatin1String("/") + cacheName + QLatin1String(".kcache");
415
416 // Note that it is important to simply unlink the file, and not truncate it
417 // smaller first to avoid SIGBUS errors and similar with shared memory
418 // attached to the underlying inode.
419 qCDebug(KCOREADDONS_DEBUG) << "Removing cache at" << cachePath;
420 QFile::remove(fileName: cachePath);
421}
422
423unsigned KSharedDataCache::totalSize() const
424{
425 try {
426 Private::CacheLocker lock(d);
427 if (lock.failed()) {
428 return 0u;
429 }
430
431 return d->shm->cacheSize;
432 } catch (KSDCCorrupted) {
433 d->recoverCorruptedCache();
434 return 0u;
435 }
436}
437
438unsigned KSharedDataCache::freeSize() const
439{
440 try {
441 Private::CacheLocker lock(d);
442 if (lock.failed()) {
443 return 0u;
444 }
445
446 return d->shm->cacheAvail * d->shm->cachePageSize();
447 } catch (KSDCCorrupted) {
448 d->recoverCorruptedCache();
449 return 0u;
450 }
451}
452
453KSharedDataCache::EvictionPolicy KSharedDataCache::evictionPolicy() const
454{
455 if (d && d->shm) {
456 return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(valueToAdd: 0));
457 }
458
459 return NoEvictionPreference;
460}
461
462void KSharedDataCache::setEvictionPolicy(EvictionPolicy newPolicy)
463{
464 if (d && d->shm) {
465 d->shm->evictionPolicy.fetchAndStoreRelease(newValue: static_cast<int>(newPolicy));
466 }
467}
468
469unsigned KSharedDataCache::timestamp() const
470{
471 if (d && d->shm) {
472 return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(valueToAdd: 0));
473 }
474
475 return 0;
476}
477
478void KSharedDataCache::setTimestamp(unsigned newTimestamp)
479{
480 if (d && d->shm) {
481 d->shm->cacheTimestamp.fetchAndStoreRelease(newValue: static_cast<int>(newTimestamp));
482 }
483}
484

source code of kcoreaddons/src/lib/caching/kshareddatacache.cpp