| 1 | /* |
| 2 | * This file is part of the KDE project. |
| 3 | * |
| 4 | * SPDX-FileCopyrightText: 2010 Michael Pyne <mpyne@kde.org> |
| 5 | * SPDX-License-Identifier: LGPL-2.0-only |
| 6 | * |
| 7 | * This library includes "MurmurHash" code from Austin Appleby, which is |
| 8 | * placed in the public domain. See http://sites.google.com/site/murmurhash/ |
| 9 | */ |
| 10 | |
| 11 | #include "kcoreaddons_debug.h" |
| 12 | |
| 13 | #include "ksdcmemory_p.h" |
| 14 | |
| 15 | #include <QByteArray> |
| 16 | |
| 17 | //----------------------------------------------------------------------------- |
| 18 | // MurmurHashAligned, by Austin Appleby |
| 19 | // (Released to the public domain, or licensed under the MIT license where |
| 20 | // software may not be released to the public domain. See |
| 21 | // http://sites.google.com/site/murmurhash/) |
| 22 | |
| 23 | // Same algorithm as MurmurHash, but only does aligned reads - should be safer |
| 24 | // on certain platforms. |
| 25 | static unsigned int MurmurHashAligned(const void *key, int len, unsigned int seed) |
| 26 | { |
| 27 | const unsigned int m = 0xc6a4a793; |
| 28 | const int r = 16; |
| 29 | |
| 30 | const unsigned char *data = reinterpret_cast<const unsigned char *>(key); |
| 31 | |
| 32 | unsigned int h = seed ^ (len * m); |
| 33 | |
| 34 | int align = reinterpret_cast<quintptr>(data) & 3; |
| 35 | |
| 36 | if (align && len >= 4) { |
| 37 | // Pre-load the temp registers |
| 38 | |
| 39 | unsigned int t = 0; |
| 40 | unsigned int d = 0; |
| 41 | |
| 42 | switch (align) { |
| 43 | case 1: |
| 44 | t |= data[2] << 16; |
| 45 | Q_FALLTHROUGH(); |
| 46 | case 2: |
| 47 | t |= data[1] << 8; |
| 48 | Q_FALLTHROUGH(); |
| 49 | case 3: |
| 50 | t |= data[0]; |
| 51 | } |
| 52 | |
| 53 | t <<= (8 * align); |
| 54 | |
| 55 | data += 4 - align; |
| 56 | len -= 4 - align; |
| 57 | |
| 58 | int sl = 8 * (4 - align); |
| 59 | int sr = 8 * align; |
| 60 | |
| 61 | // Mix |
| 62 | |
| 63 | while (len >= 4) { |
| 64 | d = *reinterpret_cast<const unsigned int *>(data); |
| 65 | t = (t >> sr) | (d << sl); |
| 66 | h += t; |
| 67 | h *= m; |
| 68 | h ^= h >> r; |
| 69 | t = d; |
| 70 | |
| 71 | data += 4; |
| 72 | len -= 4; |
| 73 | } |
| 74 | |
| 75 | // Handle leftover data in temp registers |
| 76 | |
| 77 | int pack = len < align ? len : align; |
| 78 | |
| 79 | d = 0; |
| 80 | |
| 81 | switch (pack) { |
| 82 | case 3: |
| 83 | d |= data[2] << 16; |
| 84 | Q_FALLTHROUGH(); |
| 85 | case 2: |
| 86 | d |= data[1] << 8; |
| 87 | Q_FALLTHROUGH(); |
| 88 | case 1: |
| 89 | d |= data[0]; |
| 90 | Q_FALLTHROUGH(); |
| 91 | case 0: |
| 92 | h += (t >> sr) | (d << sl); |
| 93 | h *= m; |
| 94 | h ^= h >> r; |
| 95 | } |
| 96 | |
| 97 | data += pack; |
| 98 | len -= pack; |
| 99 | } else { |
| 100 | while (len >= 4) { |
| 101 | h += *reinterpret_cast<const unsigned int *>(data); |
| 102 | h *= m; |
| 103 | h ^= h >> r; |
| 104 | |
| 105 | data += 4; |
| 106 | len -= 4; |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | //---------- |
| 111 | // Handle tail bytes |
| 112 | |
| 113 | switch (len) { |
| 114 | case 3: |
| 115 | h += data[2] << 16; |
| 116 | Q_FALLTHROUGH(); |
| 117 | case 2: |
| 118 | h += data[1] << 8; |
| 119 | Q_FALLTHROUGH(); |
| 120 | case 1: |
| 121 | h += data[0]; |
| 122 | h *= m; |
| 123 | h ^= h >> r; |
| 124 | }; |
| 125 | |
| 126 | h *= m; |
| 127 | h ^= h >> 10; |
| 128 | h *= m; |
| 129 | h ^= h >> 17; |
| 130 | |
| 131 | return h; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * This is the hash function used for our data to hopefully make the |
| 136 | * hashing used to place the QByteArrays as efficient as possible. |
| 137 | */ |
| 138 | quint32 SharedMemory::generateHash(const QByteArray &buffer) |
| 139 | { |
| 140 | // The final constant is the "seed" for MurmurHash. Do *not* change it |
| 141 | // without incrementing the cache version. |
| 142 | return MurmurHashAligned(key: buffer.data(), len: buffer.size(), seed: 0xF0F00F0F); |
| 143 | } |
| 144 | |
| 145 | // Alignment concerns become a big deal when we're dealing with shared memory, |
| 146 | // since trying to access a structure sized at, say 8 bytes at an address that |
| 147 | // is not evenly divisible by 8 is a crash-inducing error on some |
| 148 | // architectures. The compiler would normally take care of this, but with |
| 149 | // shared memory the compiler will not necessarily know the alignment expected, |
| 150 | // so make sure we account for this ourselves. To do so we need a way to find |
| 151 | // out the expected alignment. Enter ALIGNOF... |
| 152 | #ifndef ALIGNOF |
| 153 | #if defined(Q_CC_GNU) || defined(Q_CC_SUN) |
| 154 | #define ALIGNOF(x) (__alignof__(x)) // GCC provides what we want directly |
| 155 | #else |
| 156 | |
| 157 | #include <stddef.h> // offsetof |
| 158 | |
| 159 | template<class T> |
| 160 | struct __alignmentHack { |
| 161 | char firstEntry; |
| 162 | T obj; |
| 163 | static const size_t size = offsetof(__alignmentHack, obj); |
| 164 | }; |
| 165 | #define ALIGNOF(x) (__alignmentHack<x>::size) |
| 166 | #endif // Non gcc |
| 167 | #endif // ALIGNOF undefined |
| 168 | |
| 169 | // Returns a pointer properly aligned to handle size alignment. |
| 170 | // size should be a power of 2. start is assumed to be the lowest |
| 171 | // permissible address, therefore the return value will be >= start. |
| 172 | template<class T> |
| 173 | T *alignTo(const void *start, uint size = ALIGNOF(T)) |
| 174 | { |
| 175 | quintptr mask = size - 1; |
| 176 | |
| 177 | // Cast to int-type to handle bit-twiddling |
| 178 | quintptr basePointer = reinterpret_cast<quintptr>(start); |
| 179 | |
| 180 | // If (and only if) we are already aligned, adding mask into basePointer |
| 181 | // will not increment any of the bits in ~mask and we get the right answer. |
| 182 | basePointer = (basePointer + mask) & ~mask; |
| 183 | |
| 184 | return reinterpret_cast<T *>(basePointer); |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * Returns a pointer to a const object of type T, assumed to be offset |
| 189 | * *BYTES* greater than the base address. Note that in order to meet alignment |
| 190 | * requirements for T, it is possible that the returned pointer points greater |
| 191 | * than offset into base. |
| 192 | */ |
| 193 | template<class T> |
| 194 | const T *offsetAs(const void *const base, qint32 offset) |
| 195 | { |
| 196 | const char *ptr = reinterpret_cast<const char *>(base); |
| 197 | return alignTo<const T>(ptr + offset); |
| 198 | } |
| 199 | |
| 200 | // Same as above, but for non-const objects |
| 201 | template<class T> |
| 202 | T *offsetAs(void *const base, qint32 offset) |
| 203 | { |
| 204 | char *ptr = reinterpret_cast<char *>(base); |
| 205 | return alignTo<T>(ptr + offset); |
| 206 | } |
| 207 | |
| 208 | /* |
| 209 | * Returns the smallest integer greater than or equal to (a / b). |
| 210 | * a Numerator, should be ≥ 0. |
| 211 | * b Denominator, should be > 0. |
| 212 | */ |
| 213 | unsigned SharedMemory::intCeil(unsigned a, unsigned b) |
| 214 | { |
| 215 | // The overflow check is unsigned and so is actually defined behavior. |
| 216 | if (Q_UNLIKELY(b == 0 || ((a + b) < a))) { |
| 217 | throw KSDCCorrupted(); |
| 218 | } |
| 219 | |
| 220 | return (a + b - 1) / b; |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * Returns number of set bits in value (see also "Hamming weight") |
| 225 | */ |
| 226 | static unsigned countSetBits(unsigned value) |
| 227 | { |
| 228 | // K&R / Wegner's algorithm used. GCC supports __builtin_popcount but we |
| 229 | // expect there to always be only 1 bit set so this should be perhaps a bit |
| 230 | // faster 99.9% of the time. |
| 231 | unsigned count = 0; |
| 232 | for (count = 0; value != 0; count++) { |
| 233 | value &= (value - 1); // Clears least-significant set bit. |
| 234 | } |
| 235 | return count; |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * Converts the given average item size into an appropriate page size. |
| 240 | */ |
| 241 | unsigned SharedMemory::equivalentPageSize(unsigned itemSize) |
| 242 | { |
| 243 | if (itemSize == 0) { |
| 244 | return 4096; // Default average item size. |
| 245 | } |
| 246 | |
| 247 | int log2OfSize = 0; |
| 248 | while ((itemSize >>= 1) != 0) { |
| 249 | log2OfSize++; |
| 250 | } |
| 251 | |
| 252 | // Bound page size between 512 bytes and 256 KiB. |
| 253 | // If this is adjusted, also alter validSizeMask in cachePageSize |
| 254 | log2OfSize = qBound(min: 9, val: log2OfSize, max: 18); |
| 255 | |
| 256 | return (1 << log2OfSize); |
| 257 | } |
| 258 | |
| 259 | // Returns pageSize in unsigned format. |
| 260 | unsigned SharedMemory::cachePageSize() const |
| 261 | { |
| 262 | unsigned _pageSize = static_cast<unsigned>(pageSize.loadRelaxed()); |
| 263 | // bits 9-18 may be set. |
| 264 | static const unsigned validSizeMask = 0x7FE00u; |
| 265 | |
| 266 | // Check for page sizes that are not a power-of-2, or are too low/high. |
| 267 | if (Q_UNLIKELY(countSetBits(_pageSize) != 1 || (_pageSize & ~validSizeMask))) { |
| 268 | throw KSDCCorrupted(); |
| 269 | } |
| 270 | |
| 271 | return _pageSize; |
| 272 | } |
| 273 | |
| 274 | /* |
| 275 | * This is effectively the class ctor. But since we're in shared memory, |
| 276 | * there's a few rules: |
| 277 | * |
| 278 | * 1. To allow for some form of locking in the initial-setup case, we |
| 279 | * use an atomic int, which will be initialized to 0 by mmap(). Then to |
| 280 | * take the lock we atomically increment the 0 to 1. If we end up calling |
| 281 | * the QAtomicInt constructor we can mess that up, so we can't use a |
| 282 | * constructor for this class either. |
| 283 | * 2. Any member variable you add takes up space in shared memory as well, |
| 284 | * so make sure you need it. |
| 285 | */ |
| 286 | bool SharedMemory::performInitialSetup(uint _cacheSize, uint _pageSize) |
| 287 | { |
| 288 | if (_cacheSize < MINIMUM_CACHE_SIZE) { |
| 289 | qCCritical(KCOREADDONS_DEBUG) << "Internal error: Attempted to create a cache sized < " << MINIMUM_CACHE_SIZE; |
| 290 | return false; |
| 291 | } |
| 292 | |
| 293 | if (_pageSize == 0) { |
| 294 | qCCritical(KCOREADDONS_DEBUG) << "Internal error: Attempted to create a cache with 0-sized pages." ; |
| 295 | return false; |
| 296 | } |
| 297 | |
| 298 | shmLock.type = findBestSharedLock(); |
| 299 | if (shmLock.type == LOCKTYPE_INVALID) { |
| 300 | qCCritical(KCOREADDONS_DEBUG) << "Unable to find an appropriate lock to guard the shared cache. " |
| 301 | << "This *should* be essentially impossible. :(" ; |
| 302 | return false; |
| 303 | } |
| 304 | |
| 305 | bool isProcessShared = false; |
| 306 | std::unique_ptr<KSDCLock> tempLock(createLockFromId(id: shmLock.type, lock&: shmLock)); |
| 307 | |
| 308 | if (!tempLock->initialize(processSharingSupported&: isProcessShared)) { |
| 309 | qCCritical(KCOREADDONS_DEBUG) << "Unable to initialize the lock for the cache!" ; |
| 310 | return false; |
| 311 | } |
| 312 | |
| 313 | if (!isProcessShared) { |
| 314 | qCWarning(KCOREADDONS_DEBUG) << "Cache initialized, but does not support being" |
| 315 | << "shared across processes." ; |
| 316 | } |
| 317 | |
| 318 | // These must be updated to make some of our auxiliary functions |
| 319 | // work right since their values will be based on the cache size. |
| 320 | cacheSize = _cacheSize; |
| 321 | pageSize = _pageSize; |
| 322 | version = PIXMAP_CACHE_VERSION; |
| 323 | cacheTimestamp = static_cast<unsigned>(::time(timer: nullptr)); |
| 324 | |
| 325 | clearInternalTables(); |
| 326 | |
| 327 | // Unlock the mini-lock, and introduce a total memory barrier to make |
| 328 | // sure all changes have propagated even without a mutex. |
| 329 | return ready.ref(); |
| 330 | } |
| 331 | |
| 332 | void SharedMemory::clearInternalTables() |
| 333 | { |
| 334 | // Assumes we're already locked somehow. |
| 335 | cacheAvail = pageTableSize(); |
| 336 | |
| 337 | // Setup page tables to point nowhere |
| 338 | PageTableEntry *table = pageTable(); |
| 339 | for (uint i = 0; i < pageTableSize(); ++i) { |
| 340 | table[i].index = -1; |
| 341 | } |
| 342 | |
| 343 | // Setup index tables to be accurate. |
| 344 | IndexTableEntry *indices = indexTable(); |
| 345 | for (uint i = 0; i < indexTableSize(); ++i) { |
| 346 | indices[i].firstPage = -1; |
| 347 | indices[i].useCount = 0; |
| 348 | indices[i].fileNameHash = 0; |
| 349 | indices[i].totalItemSize = 0; |
| 350 | indices[i].addTime = 0; |
| 351 | indices[i].lastUsedTime = 0; |
| 352 | } |
| 353 | } |
| 354 | |
| 355 | const IndexTableEntry *SharedMemory::indexTable() const |
| 356 | { |
| 357 | // Index Table goes immediately after this struct, at the first byte |
| 358 | // where alignment constraints are met (accounted for by offsetAs). |
| 359 | return offsetAs<IndexTableEntry>(base: this, offset: sizeof(*this)); |
| 360 | } |
| 361 | |
| 362 | const PageTableEntry *SharedMemory::pageTable() const |
| 363 | { |
| 364 | const IndexTableEntry *base = indexTable(); |
| 365 | base += indexTableSize(); |
| 366 | |
| 367 | // Let's call wherever we end up the start of the page table... |
| 368 | return alignTo<PageTableEntry>(start: base); |
| 369 | } |
| 370 | |
| 371 | const void *SharedMemory::cachePages() const |
| 372 | { |
| 373 | const PageTableEntry *tableStart = pageTable(); |
| 374 | tableStart += pageTableSize(); |
| 375 | |
| 376 | // Let's call wherever we end up the start of the data... |
| 377 | return alignTo<void>(start: tableStart, size: cachePageSize()); |
| 378 | } |
| 379 | |
| 380 | const void *SharedMemory::page(pageID at) const |
| 381 | { |
| 382 | if (static_cast<uint>(at) >= pageTableSize()) { |
| 383 | return nullptr; |
| 384 | } |
| 385 | |
| 386 | // We must manually calculate this one since pageSize varies. |
| 387 | const char *pageStart = reinterpret_cast<const char *>(cachePages()); |
| 388 | pageStart += (at * cachePageSize()); |
| 389 | |
| 390 | return reinterpret_cast<const void *>(pageStart); |
| 391 | } |
| 392 | |
| 393 | // The following are non-const versions of some of the methods defined |
| 394 | // above. They use const_cast<> because I feel that is better than |
| 395 | // duplicating the code. I suppose template member functions (?) |
| 396 | // may work, may investigate later. |
| 397 | IndexTableEntry *SharedMemory::indexTable() |
| 398 | { |
| 399 | const SharedMemory *that = const_cast<const SharedMemory *>(this); |
| 400 | return const_cast<IndexTableEntry *>(that->indexTable()); |
| 401 | } |
| 402 | |
| 403 | PageTableEntry *SharedMemory::pageTable() |
| 404 | { |
| 405 | const SharedMemory *that = const_cast<const SharedMemory *>(this); |
| 406 | return const_cast<PageTableEntry *>(that->pageTable()); |
| 407 | } |
| 408 | |
| 409 | void *SharedMemory::cachePages() |
| 410 | { |
| 411 | const SharedMemory *that = const_cast<const SharedMemory *>(this); |
| 412 | return const_cast<void *>(that->cachePages()); |
| 413 | } |
| 414 | |
| 415 | void *SharedMemory::page(pageID at) |
| 416 | { |
| 417 | const SharedMemory *that = const_cast<const SharedMemory *>(this); |
| 418 | return const_cast<void *>(that->page(at)); |
| 419 | } |
| 420 | |
| 421 | uint SharedMemory::pageTableSize() const |
| 422 | { |
| 423 | return cacheSize / cachePageSize(); |
| 424 | } |
| 425 | |
| 426 | uint SharedMemory::indexTableSize() const |
| 427 | { |
| 428 | // Assume 2 pages on average are needed -> the number of entries |
| 429 | // would be half of the number of pages. |
| 430 | return pageTableSize() / 2; |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * Returns the index of the first page, for the set of contiguous |
| 435 | * pages that can hold pagesNeeded PAGES. |
| 436 | */ |
| 437 | pageID SharedMemory::findEmptyPages(uint pagesNeeded) const |
| 438 | { |
| 439 | if (Q_UNLIKELY(pagesNeeded > pageTableSize())) { |
| 440 | return pageTableSize(); |
| 441 | } |
| 442 | |
| 443 | // Loop through the page table, find the first empty page, and just |
| 444 | // makes sure that there are enough free pages. |
| 445 | const PageTableEntry *table = pageTable(); |
| 446 | uint contiguousPagesFound = 0; |
| 447 | pageID base = 0; |
| 448 | for (pageID i = 0; i < static_cast<int>(pageTableSize()); ++i) { |
| 449 | if (table[i].index < 0) { |
| 450 | if (contiguousPagesFound == 0) { |
| 451 | base = i; |
| 452 | } |
| 453 | contiguousPagesFound++; |
| 454 | } else { |
| 455 | contiguousPagesFound = 0; |
| 456 | } |
| 457 | |
| 458 | if (contiguousPagesFound == pagesNeeded) { |
| 459 | return base; |
| 460 | } |
| 461 | } |
| 462 | |
| 463 | return pageTableSize(); |
| 464 | } |
| 465 | |
| 466 | // left < right? |
| 467 | bool SharedMemory::lruCompare(const IndexTableEntry &l, const IndexTableEntry &r) |
| 468 | { |
| 469 | // Ensure invalid entries migrate to the end |
| 470 | if (l.firstPage < 0 && r.firstPage >= 0) { |
| 471 | return false; |
| 472 | } |
| 473 | if (l.firstPage >= 0 && r.firstPage < 0) { |
| 474 | return true; |
| 475 | } |
| 476 | |
| 477 | // Most recently used will have the highest absolute time => |
| 478 | // least recently used (lowest) should go first => use left < right |
| 479 | return l.lastUsedTime < r.lastUsedTime; |
| 480 | } |
| 481 | |
| 482 | // left < right? |
| 483 | bool SharedMemory::seldomUsedCompare(const IndexTableEntry &l, const IndexTableEntry &r) |
| 484 | { |
| 485 | // Ensure invalid entries migrate to the end |
| 486 | if (l.firstPage < 0 && r.firstPage >= 0) { |
| 487 | return false; |
| 488 | } |
| 489 | if (l.firstPage >= 0 && r.firstPage < 0) { |
| 490 | return true; |
| 491 | } |
| 492 | |
| 493 | // Put lowest use count at start by using left < right |
| 494 | return l.useCount < r.useCount; |
| 495 | } |
| 496 | |
| 497 | // left < right? |
| 498 | bool SharedMemory::ageCompare(const IndexTableEntry &l, const IndexTableEntry &r) |
| 499 | { |
| 500 | // Ensure invalid entries migrate to the end |
| 501 | if (l.firstPage < 0 && r.firstPage >= 0) { |
| 502 | return false; |
| 503 | } |
| 504 | if (l.firstPage >= 0 && r.firstPage < 0) { |
| 505 | return true; |
| 506 | } |
| 507 | |
| 508 | // Oldest entries die first -- they have the lowest absolute add time, |
| 509 | // so just like the others use left < right |
| 510 | return l.addTime < r.addTime; |
| 511 | } |
| 512 | |
| 513 | void SharedMemory::defragment() |
| 514 | { |
| 515 | if (cacheAvail * cachePageSize() == cacheSize) { |
| 516 | return; // That was easy |
| 517 | } |
| 518 | |
| 519 | qCDebug(KCOREADDONS_DEBUG) << "Defragmenting the shared cache" ; |
| 520 | |
| 521 | // Just do a linear scan, and anytime there is free space, swap it |
| 522 | // with the pages to its right. In order to meet the precondition |
| 523 | // we need to skip any used pages first. |
| 524 | |
| 525 | pageID currentPage = 0; |
| 526 | pageID idLimit = static_cast<pageID>(pageTableSize()); |
| 527 | PageTableEntry *pages = pageTable(); |
| 528 | |
| 529 | if (Q_UNLIKELY(!pages || idLimit <= 0)) { |
| 530 | throw KSDCCorrupted(); |
| 531 | } |
| 532 | |
| 533 | // Skip used pages |
| 534 | while (currentPage < idLimit && pages[currentPage].index >= 0) { |
| 535 | ++currentPage; |
| 536 | } |
| 537 | |
| 538 | pageID freeSpot = currentPage; |
| 539 | |
| 540 | // Main loop, starting from a free page, skip to the used pages and |
| 541 | // move them back. |
| 542 | while (currentPage < idLimit) { |
| 543 | // Find the next used page |
| 544 | while (currentPage < idLimit && pages[currentPage].index < 0) { |
| 545 | ++currentPage; |
| 546 | } |
| 547 | |
| 548 | if (currentPage >= idLimit) { |
| 549 | break; |
| 550 | } |
| 551 | |
| 552 | // Found an entry, move it. |
| 553 | qint32 affectedIndex = pages[currentPage].index; |
| 554 | if (Q_UNLIKELY(affectedIndex < 0 || affectedIndex >= idLimit || indexTable()[affectedIndex].firstPage != currentPage)) { |
| 555 | throw KSDCCorrupted(); |
| 556 | } |
| 557 | |
| 558 | indexTable()[affectedIndex].firstPage = freeSpot; |
| 559 | |
| 560 | // Moving one page at a time guarantees we can use memcpy safely |
| 561 | // (in other words, the source and destination will not overlap). |
| 562 | while (currentPage < idLimit && pages[currentPage].index >= 0) { |
| 563 | const void *const sourcePage = page(at: currentPage); |
| 564 | void *const destinationPage = page(at: freeSpot); |
| 565 | |
| 566 | // We always move used pages into previously-found empty spots, |
| 567 | // so check ordering as well for logic errors. |
| 568 | if (Q_UNLIKELY(!sourcePage || !destinationPage || sourcePage < destinationPage)) { |
| 569 | throw KSDCCorrupted(); |
| 570 | } |
| 571 | |
| 572 | ::memcpy(dest: destinationPage, src: sourcePage, n: cachePageSize()); |
| 573 | pages[freeSpot].index = affectedIndex; |
| 574 | pages[currentPage].index = -1; |
| 575 | ++currentPage; |
| 576 | ++freeSpot; |
| 577 | |
| 578 | // If we've just moved the very last page and it happened to |
| 579 | // be at the very end of the cache then we're done. |
| 580 | if (currentPage >= idLimit) { |
| 581 | break; |
| 582 | } |
| 583 | |
| 584 | // We're moving consecutive used pages whether they belong to |
| 585 | // our affected entry or not, so detect if we've started moving |
| 586 | // the data for a different entry and adjust if necessary. |
| 587 | if (affectedIndex != pages[currentPage].index) { |
| 588 | indexTable()[pages[currentPage].index].firstPage = freeSpot; |
| 589 | } |
| 590 | affectedIndex = pages[currentPage].index; |
| 591 | } |
| 592 | |
| 593 | // At this point currentPage is on a page that is unused, and the |
| 594 | // cycle repeats. However, currentPage is not the first unused |
| 595 | // page, freeSpot is, so leave it alone. |
| 596 | } |
| 597 | } |
| 598 | |
| 599 | /* |
| 600 | * Finds the index entry for a given key. |
| 601 | * key UTF-8 encoded key to search for. |
| 602 | * Returns The index of the entry in the cache named by key. Returns |
| 603 | * <0 if no such entry is present. |
| 604 | */ |
| 605 | qint32 SharedMemory::findNamedEntry(const QByteArray &key) const |
| 606 | { |
| 607 | uint keyHash = SharedMemory::generateHash(buffer: key); |
| 608 | uint position = keyHash % indexTableSize(); |
| 609 | uint probeNumber = 1; // See insert() for description |
| 610 | |
| 611 | // Imagine 3 entries A, B, C in this logical probing chain. If B is |
| 612 | // later removed then we can't find C either. So, we must keep |
| 613 | // searching for probeNumber number of tries (or we find the item, |
| 614 | // obviously). |
| 615 | while (indexTable()[position].fileNameHash != keyHash && probeNumber < MAX_PROBE_COUNT) { |
| 616 | position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2) % indexTableSize(); |
| 617 | probeNumber++; |
| 618 | } |
| 619 | |
| 620 | if (indexTable()[position].fileNameHash == keyHash) { |
| 621 | pageID firstPage = indexTable()[position].firstPage; |
| 622 | if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) { |
| 623 | return -1; |
| 624 | } |
| 625 | |
| 626 | const void *resultPage = page(at: firstPage); |
| 627 | if (Q_UNLIKELY(!resultPage)) { |
| 628 | throw KSDCCorrupted(); |
| 629 | } |
| 630 | |
| 631 | const char *utf8FileName = reinterpret_cast<const char *>(resultPage); |
| 632 | if (qstrncmp(str1: utf8FileName, str2: key.constData(), len: cachePageSize()) == 0) { |
| 633 | return position; |
| 634 | } |
| 635 | } |
| 636 | |
| 637 | return -1; // Not found, or a different one found. |
| 638 | } |
| 639 | |
| 640 | // Function to use with std::unique_ptr in removeUsedPages below... |
| 641 | void SharedMemory::deleteTable(IndexTableEntry *table) |
| 642 | { |
| 643 | delete[] table; |
| 644 | } |
| 645 | |
| 646 | /* |
| 647 | * Removes the requested number of pages. |
| 648 | * |
| 649 | * numberNeeded the number of pages required to fulfill a current request. |
| 650 | * This number should be <0 and <= the number of pages in the cache. |
| 651 | * Returns The identifier of the beginning of a consecutive block of pages able |
| 652 | * to fill the request. Returns a value >= pageTableSize() if no such |
| 653 | * request can be filled. |
| 654 | * @internal |
| 655 | */ |
| 656 | uint SharedMemory::removeUsedPages(uint numberNeeded) |
| 657 | { |
| 658 | if (numberNeeded == 0) { |
| 659 | qCCritical(KCOREADDONS_DEBUG) << "Internal error: Asked to remove exactly 0 pages for some reason." ; |
| 660 | throw KSDCCorrupted(); |
| 661 | } |
| 662 | |
| 663 | if (numberNeeded > pageTableSize()) { |
| 664 | qCCritical(KCOREADDONS_DEBUG) << "Internal error: Requested more space than exists in the cache." ; |
| 665 | qCCritical(KCOREADDONS_DEBUG) << numberNeeded << "requested, " << pageTableSize() << "is the total possible." ; |
| 666 | throw KSDCCorrupted(); |
| 667 | } |
| 668 | |
| 669 | // If the cache free space is large enough we will defragment first |
| 670 | // instead since it's likely we're highly fragmented. |
| 671 | // Otherwise, we will (eventually) simply remove entries per the |
| 672 | // eviction order set for the cache until there is enough room |
| 673 | // available to hold the number of pages we need. |
| 674 | |
| 675 | qCDebug(KCOREADDONS_DEBUG) << "Removing old entries to free up" << numberNeeded << "pages," << cacheAvail << "are already theoretically available." ; |
| 676 | |
| 677 | if (cacheAvail > 3 * numberNeeded) { |
| 678 | defragment(); |
| 679 | uint result = findEmptyPages(pagesNeeded: numberNeeded); |
| 680 | |
| 681 | if (result < pageTableSize()) { |
| 682 | return result; |
| 683 | } else { |
| 684 | qCCritical(KCOREADDONS_DEBUG) << "Just defragmented a locked cache, but still there" |
| 685 | << "isn't enough room for the current request." ; |
| 686 | } |
| 687 | } |
| 688 | |
| 689 | // At this point we know we'll have to free some space up, so sort our |
| 690 | // list of entries by whatever the current criteria are and start |
| 691 | // killing expired entries. |
| 692 | std::unique_ptr<IndexTableEntry, decltype(deleteTable) *> tablePtr(new IndexTableEntry[indexTableSize()], deleteTable); |
| 693 | |
| 694 | if (!tablePtr) { |
| 695 | qCCritical(KCOREADDONS_DEBUG) << "Unable to allocate temporary memory for sorting the cache!" ; |
| 696 | clearInternalTables(); |
| 697 | throw KSDCCorrupted(); |
| 698 | } |
| 699 | |
| 700 | // We use tablePtr to ensure the data is destroyed, but do the access |
| 701 | // via a helper pointer to allow for array ops. |
| 702 | IndexTableEntry *table = tablePtr.get(); |
| 703 | |
| 704 | ::memcpy(dest: table, src: indexTable(), n: sizeof(IndexTableEntry) * indexTableSize()); |
| 705 | |
| 706 | // Our entry ID is simply its index into the |
| 707 | // index table, which qSort will rearrange all willy-nilly, so first |
| 708 | // we'll save the *real* entry ID into firstPage (which is useless in |
| 709 | // our copy of the index table). On the other hand if the entry is not |
| 710 | // used then we note that with -1. |
| 711 | for (uint i = 0; i < indexTableSize(); ++i) { |
| 712 | table[i].firstPage = table[i].useCount > 0 ? static_cast<pageID>(i) : -1; |
| 713 | } |
| 714 | |
| 715 | // Declare the comparison function that we'll use to pass to qSort, |
| 716 | // based on our cache eviction policy. |
| 717 | bool (*compareFunction)(const IndexTableEntry &, const IndexTableEntry &); |
| 718 | switch (evictionPolicy.loadRelaxed()) { |
| 719 | case KSharedDataCache::EvictLeastOftenUsed: |
| 720 | case KSharedDataCache::NoEvictionPreference: |
| 721 | default: |
| 722 | compareFunction = seldomUsedCompare; |
| 723 | break; |
| 724 | |
| 725 | case KSharedDataCache::EvictLeastRecentlyUsed: |
| 726 | compareFunction = lruCompare; |
| 727 | break; |
| 728 | |
| 729 | case KSharedDataCache::EvictOldest: |
| 730 | compareFunction = ageCompare; |
| 731 | break; |
| 732 | } |
| 733 | |
| 734 | std::sort(first: table, last: table + indexTableSize(), comp: compareFunction); |
| 735 | |
| 736 | // Least recently used entries will be in the front. |
| 737 | // Start killing until we have room. |
| 738 | |
| 739 | // Note on removeEntry: It expects an index into the index table, |
| 740 | // but our sorted list is all jumbled. But we stored the real index |
| 741 | // in the firstPage member. |
| 742 | // Remove entries until we've removed at least the required number |
| 743 | // of pages. |
| 744 | uint i = 0; |
| 745 | while (i < indexTableSize() && numberNeeded > cacheAvail) { |
| 746 | int curIndex = table[i++].firstPage; // Really an index, not a page |
| 747 | |
| 748 | // Removed everything, still no luck (or curIndex is set but too high). |
| 749 | if (curIndex < 0 || static_cast<uint>(curIndex) >= indexTableSize()) { |
| 750 | qCCritical(KCOREADDONS_DEBUG) << "Trying to remove index" << curIndex << "out-of-bounds for index table of size" << indexTableSize(); |
| 751 | throw KSDCCorrupted(); |
| 752 | } |
| 753 | |
| 754 | qCDebug(KCOREADDONS_DEBUG) << "Removing entry of" << indexTable()[curIndex].totalItemSize << "size" ; |
| 755 | removeEntry(index: curIndex); |
| 756 | } |
| 757 | |
| 758 | // At this point let's see if we have freed up enough data by |
| 759 | // defragmenting first and seeing if we can find that free space. |
| 760 | defragment(); |
| 761 | |
| 762 | pageID result = pageTableSize(); |
| 763 | while (i < indexTableSize() && (static_cast<uint>(result = findEmptyPages(pagesNeeded: numberNeeded))) >= pageTableSize()) { |
| 764 | int curIndex = table[i++].firstPage; |
| 765 | |
| 766 | if (curIndex < 0) { |
| 767 | // One last shot. |
| 768 | defragment(); |
| 769 | return findEmptyPages(pagesNeeded: numberNeeded); |
| 770 | } |
| 771 | |
| 772 | if (Q_UNLIKELY(static_cast<uint>(curIndex) >= indexTableSize())) { |
| 773 | throw KSDCCorrupted(); |
| 774 | } |
| 775 | |
| 776 | removeEntry(index: curIndex); |
| 777 | } |
| 778 | |
| 779 | // Whew. |
| 780 | return result; |
| 781 | } |
| 782 | |
| 783 | // Returns the total size required for a given cache size. |
| 784 | uint SharedMemory::totalSize(uint cacheSize, uint effectivePageSize) |
| 785 | { |
| 786 | uint numberPages = intCeil(a: cacheSize, b: effectivePageSize); |
| 787 | uint indexTableSize = numberPages / 2; |
| 788 | |
| 789 | // Knowing the number of pages, we can determine what addresses we'd be |
| 790 | // using (properly aligned), and from there determine how much memory |
| 791 | // we'd use. |
| 792 | IndexTableEntry *indexTableStart = offsetAs<IndexTableEntry>(base: static_cast<void *>(nullptr), offset: sizeof(SharedMemory)); |
| 793 | |
| 794 | indexTableStart += indexTableSize; |
| 795 | |
| 796 | PageTableEntry *pageTableStart = reinterpret_cast<PageTableEntry *>(indexTableStart); |
| 797 | pageTableStart = alignTo<PageTableEntry>(start: pageTableStart); |
| 798 | pageTableStart += numberPages; |
| 799 | |
| 800 | // The weird part, we must manually adjust the pointer based on the page size. |
| 801 | char *cacheStart = reinterpret_cast<char *>(pageTableStart); |
| 802 | cacheStart += (numberPages * effectivePageSize); |
| 803 | |
| 804 | // ALIGNOF gives pointer alignment |
| 805 | cacheStart = alignTo<char>(start: cacheStart, ALIGNOF(void *)); |
| 806 | |
| 807 | // We've traversed the header, index, page table, and cache. |
| 808 | // Wherever we're at now is the size of the enchilada. |
| 809 | return static_cast<uint>(reinterpret_cast<quintptr>(cacheStart)); |
| 810 | } |
| 811 | |
| 812 | uint SharedMemory::fileNameHash(const QByteArray &utf8FileName) const |
| 813 | { |
| 814 | return generateHash(buffer: utf8FileName) % indexTableSize(); |
| 815 | } |
| 816 | |
| 817 | void SharedMemory::clear() |
| 818 | { |
| 819 | clearInternalTables(); |
| 820 | } |
| 821 | |
| 822 | // Must be called while the lock is already held! |
| 823 | void SharedMemory::removeEntry(uint index) |
| 824 | { |
| 825 | if (index >= indexTableSize() || cacheAvail > pageTableSize()) { |
| 826 | throw KSDCCorrupted(); |
| 827 | } |
| 828 | |
| 829 | PageTableEntry *pageTableEntries = pageTable(); |
| 830 | IndexTableEntry *entriesIndex = indexTable(); |
| 831 | |
| 832 | // Update page table first |
| 833 | pageID firstPage = entriesIndex[index].firstPage; |
| 834 | if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) { |
| 835 | qCDebug(KCOREADDONS_DEBUG) << "Trying to remove an entry which is already invalid. This " |
| 836 | << "cache is likely corrupt." ; |
| 837 | throw KSDCCorrupted(); |
| 838 | } |
| 839 | |
| 840 | if (index != static_cast<uint>(pageTableEntries[firstPage].index)) { |
| 841 | qCCritical(KCOREADDONS_DEBUG) << "Removing entry" << index << "but the matching data" |
| 842 | << "doesn't link back -- cache is corrupt, clearing." ; |
| 843 | throw KSDCCorrupted(); |
| 844 | } |
| 845 | |
| 846 | uint entriesToRemove = intCeil(a: entriesIndex[index].totalItemSize, b: cachePageSize()); |
| 847 | uint savedCacheSize = cacheAvail; |
| 848 | for (uint i = firstPage; i < pageTableSize() && static_cast<uint>(pageTableEntries[i].index) == index; ++i) { |
| 849 | pageTableEntries[i].index = -1; |
| 850 | cacheAvail++; |
| 851 | } |
| 852 | |
| 853 | if ((cacheAvail - savedCacheSize) != entriesToRemove) { |
| 854 | qCCritical(KCOREADDONS_DEBUG) << "We somehow did not remove" << entriesToRemove << "when removing entry" << index << ", instead we removed" |
| 855 | << (cacheAvail - savedCacheSize); |
| 856 | throw KSDCCorrupted(); |
| 857 | } |
| 858 | |
| 859 | // For debugging |
| 860 | #ifdef NDEBUG |
| 861 | void *const startOfData = page(firstPage); |
| 862 | if (startOfData) { |
| 863 | QByteArray str((const char *)startOfData); |
| 864 | str.prepend(" REMOVED: " ); |
| 865 | str.prepend(QByteArray::number(index)); |
| 866 | str.prepend("ENTRY " ); |
| 867 | |
| 868 | ::memcpy(startOfData, str.constData(), str.size() + 1); |
| 869 | } |
| 870 | #endif |
| 871 | |
| 872 | // Update the index |
| 873 | entriesIndex[index].fileNameHash = 0; |
| 874 | entriesIndex[index].totalItemSize = 0; |
| 875 | entriesIndex[index].useCount = 0; |
| 876 | entriesIndex[index].lastUsedTime = 0; |
| 877 | entriesIndex[index].addTime = 0; |
| 878 | entriesIndex[index].firstPage = -1; |
| 879 | } |
| 880 | |