32 #include <QtCore/QDateTime> 33 #include <QtCore/QSharedPointer> 34 #include <QtCore/QByteArray> 35 #include <QtCore/QFile> 36 #include <QtCore/QAtomicInt> 37 #include <QtCore/QList> 39 #include <sys/types.h> 80 const unsigned int m = 0xc6a4a793;
83 const unsigned char * data =
reinterpret_cast<const unsigned char *
>(key);
85 unsigned int h = seed ^ (len * m);
87 int align =
reinterpret_cast<quintptr
>(data) & 3;
89 if(align & (len >= 4))
93 unsigned int t = 0, d = 0;
97 case 1: t |= data[2] << 16;
98 case 2: t |= data[1] << 8;
107 int sl = 8 * (4-align);
114 d = *
reinterpret_cast<const unsigned int *
>(data);
115 t = (t >> sr) | (d << sl);
127 int pack = len < align ? len : align;
133 case 3: d |= data[2] << 16;
134 case 2: d |= data[1] << 8;
135 case 1: d |= data[0];
136 case 0: h += (t >> sr) | (d << sl);
148 h += *
reinterpret_cast<const unsigned int *
>(data);
162 case 3: h += data[2] << 16;
163 case 2: h += data[1] << 8;
164 case 1: h += data[0];
196 #if defined(Q_CC_GNU) || defined(Q_CC_SUN) 197 #define ALIGNOF(x) (__alignof__ (x)) // GCC provides what we want directly 203 struct __alignmentHack
207 static const size_t size =
offsetof(__alignmentHack, obj);
209 #define ALIGNOF(x) (__alignmentHack<x>::size) 211 #endif // ALIGNOF undefined 219 quintptr
mask = size - 1;
222 quintptr basePointer =
reinterpret_cast<quintptr
>(start);
226 basePointer = (basePointer +
mask) & ~
mask;
228 return reinterpret_cast<T *
>(basePointer);
240 const char *ptr =
reinterpret_cast<const char*
>(base);
241 return alignTo<const T>(ptr + offset);
248 char *ptr =
reinterpret_cast<char *
>(base);
249 return alignTo<T>(ptr + offset);
257 static unsigned intCeil(
unsigned a,
unsigned b)
260 if (KDE_ISUNLIKELY(b == 0 || ((a + b) < a)))
262 throw KSDCCorrupted();
265 return (a + b - 1) / b;
277 for (count = 0; value != 0; count++) {
278 value &= (value - 1);
320 struct IndexTableEntry
324 mutable uint useCount;
326 mutable time_t lastUsedTime;
331 struct PageTableEntry
357 PIXMAP_CACHE_VERSION = 12,
358 MINIMUM_CACHE_SIZE = 4096
372 QAtomicInt evictionPolicy;
382 QAtomicInt cacheTimestamp;
387 static unsigned equivalentPageSize(
unsigned itemSize)
394 while ((itemSize >>= 1) != 0) {
400 log2OfSize = qBound(9, log2OfSize, 18);
402 return (1 << log2OfSize);
406 unsigned cachePageSize()
const 408 unsigned _pageSize =
static_cast<unsigned>(pageSize);
410 static const unsigned validSizeMask = 0x7FE00u;
413 if (KDE_ISUNLIKELY(
countSetBits(_pageSize) != 1 || (_pageSize & ~validSizeMask))) {
414 throw KSDCCorrupted();
432 bool performInitialSetup(uint _cacheSize, uint _pageSize)
434 if (_cacheSize < MINIMUM_CACHE_SIZE) {
435 kError(
ksdcArea()) <<
"Internal error: Attempted to create a cache sized < " 436 << MINIMUM_CACHE_SIZE;
440 if (_pageSize == 0) {
441 kError(
ksdcArea()) <<
"Internal error: Attempted to create a cache with 0-sized pages.";
447 kError(
ksdcArea()) <<
"Unable to find an appropriate lock to guard the shared cache. " 448 <<
"This *should* be essentially impossible. :(";
452 bool isProcessShared =
false;
455 if (!tempLock->initialize(isProcessShared)) {
456 kError(
ksdcArea()) <<
"Unable to initialize the lock for the cache!";
460 if (!isProcessShared) {
462 <<
"shared across processes.";
467 cacheSize = _cacheSize;
468 pageSize = _pageSize;
469 version = PIXMAP_CACHE_VERSION;
470 cacheTimestamp =
static_cast<unsigned>(::time(0));
472 clearInternalTables();
481 void clearInternalTables()
484 cacheAvail = pageTableSize();
487 PageTableEntry *table = pageTable();
488 for (uint i = 0; i < pageTableSize(); ++i) {
493 IndexTableEntry *indices = indexTable();
494 for (uint i = 0; i < indexTableSize(); ++i) {
495 indices[i].firstPage = -1;
496 indices[i].useCount = 0;
497 indices[i].fileNameHash = 0;
498 indices[i].totalItemSize = 0;
499 indices[i].addTime = 0;
500 indices[i].lastUsedTime = 0;
504 const IndexTableEntry *indexTable()
const 508 return offsetAs<IndexTableEntry>(
this,
sizeof(*this));
511 const PageTableEntry *pageTable()
const 513 const IndexTableEntry *base = indexTable();
514 base += indexTableSize();
517 return alignTo<PageTableEntry>(base);
520 const void *cachePages()
const 522 const PageTableEntry *tableStart = pageTable();
523 tableStart += pageTableSize();
526 return alignTo<void>(tableStart, cachePageSize());
529 const void *page(
pageID at)
const 531 if (static_cast<uint>(at) >= pageTableSize()) {
536 const char *pageStart =
reinterpret_cast<const char *
>(cachePages());
537 pageStart += (at * cachePageSize());
539 return reinterpret_cast<const void *
>(pageStart);
546 IndexTableEntry *indexTable()
548 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
549 return const_cast<IndexTableEntry *
>(that->indexTable());
552 PageTableEntry *pageTable()
554 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
555 return const_cast<PageTableEntry *
>(that->pageTable());
560 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
561 return const_cast<void *
>(that->cachePages());
566 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
567 return const_cast<void *
>(that->page(at));
570 uint pageTableSize()
const 572 return cacheSize / cachePageSize();
575 uint indexTableSize()
const 579 return pageTableSize() / 2;
586 pageID findEmptyPages(uint pagesNeeded)
const 588 if (KDE_ISUNLIKELY(pagesNeeded > pageTableSize())) {
589 return pageTableSize();
594 const PageTableEntry *table = pageTable();
595 uint contiguousPagesFound = 0;
597 for (
pageID i = 0; i < static_cast<int>(pageTableSize()); ++i) {
598 if (table[i].index < 0) {
599 if (contiguousPagesFound == 0) {
602 contiguousPagesFound++;
605 contiguousPagesFound = 0;
608 if (contiguousPagesFound == pagesNeeded) {
613 return pageTableSize();
617 static bool lruCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
620 if (l.firstPage < 0 && r.firstPage >= 0) {
623 if (l.firstPage >= 0 && r.firstPage < 0) {
629 return l.lastUsedTime < r.lastUsedTime;
633 static bool seldomUsedCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
636 if (l.firstPage < 0 && r.firstPage >= 0) {
639 if (l.firstPage >= 0 && r.firstPage < 0) {
644 return l.useCount < r.useCount;
648 static bool ageCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
651 if (l.firstPage < 0 && r.firstPage >= 0) {
654 if (l.firstPage >= 0 && r.firstPage < 0) {
660 return l.addTime < r.addTime;
665 if (cacheAvail * cachePageSize() == cacheSize) {
677 PageTableEntry *pages = pageTable();
679 if (KDE_ISUNLIKELY(!pages || idLimit <= 0)) {
680 throw KSDCCorrupted();
684 while (currentPage < idLimit && pages[currentPage].index >= 0) {
688 pageID freeSpot = currentPage;
692 while (currentPage < idLimit) {
694 while (currentPage < idLimit && pages[currentPage].index < 0) {
698 if (currentPage >= idLimit) {
703 qint32 affectedIndex = pages[currentPage].index;
704 if (KDE_ISUNLIKELY(affectedIndex < 0 ||
705 affectedIndex >= idLimit ||
706 indexTable()[affectedIndex].firstPage != currentPage))
708 throw KSDCCorrupted();
711 indexTable()[affectedIndex].firstPage = freeSpot;
715 while (currentPage < idLimit && pages[currentPage].index >= 0) {
716 const void *
const sourcePage = page(currentPage);
717 void *
const destinationPage = page(freeSpot);
721 if(KDE_ISUNLIKELY(!sourcePage || !destinationPage ||
722 sourcePage < destinationPage))
724 throw KSDCCorrupted();
727 ::memcpy(destinationPage, sourcePage, cachePageSize());
728 pages[freeSpot].index = affectedIndex;
729 pages[currentPage].index = -1;
735 if (currentPage >= idLimit) {
742 if (affectedIndex != pages[currentPage].index) {
743 indexTable()[pages[currentPage].index].firstPage = freeSpot;
745 affectedIndex = pages[currentPage].index;
760 qint32 findNamedEntry(
const QByteArray &key)
const 763 uint position = keyHash % indexTableSize();
764 uint probeNumber = 1;
770 while (indexTable()[position].fileNameHash != keyHash &&
773 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
778 if (indexTable()[position].fileNameHash == keyHash) {
779 pageID firstPage = indexTable()[position].firstPage;
780 if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) {
784 const void *resultPage = page(firstPage);
785 if (KDE_ISUNLIKELY(!resultPage)) {
786 throw KSDCCorrupted();
789 const char *utf8FileName =
reinterpret_cast<const char *
>(resultPage);
790 if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) {
799 static void deleteTable(IndexTableEntry *table) {
813 uint removeUsedPages(uint numberNeeded)
815 if (numberNeeded == 0) {
816 kError(
ksdcArea()) <<
"Internal error: Asked to remove exactly 0 pages for some reason.";
817 throw KSDCCorrupted();
820 if (numberNeeded > pageTableSize()) {
821 kError(
ksdcArea()) <<
"Internal error: Requested more space than exists in the cache.";
822 kError(
ksdcArea()) << numberNeeded <<
"requested, " << pageTableSize() <<
"is the total possible.";
823 throw KSDCCorrupted();
832 kDebug(
ksdcArea()) <<
"Removing old entries to free up" << numberNeeded <<
"pages," 833 << cacheAvail <<
"are already theoretically available.";
835 if (cacheAvail > 3 * numberNeeded) {
837 uint result = findEmptyPages(numberNeeded);
839 if (result < pageTableSize()) {
843 kError(
ksdcArea()) <<
"Just defragmented a locked cache, but still there" 844 <<
"isn't enough room for the current request.";
851 QSharedPointer<IndexTableEntry> tablePtr(
new IndexTableEntry[indexTableSize()], deleteTable);
854 kError(
ksdcArea()) <<
"Unable to allocate temporary memory for sorting the cache!";
855 clearInternalTables();
856 throw KSDCCorrupted();
861 IndexTableEntry *table = tablePtr.data();
863 ::memcpy(table, indexTable(),
sizeof(IndexTableEntry) * indexTableSize());
870 for (uint i = 0; i < indexTableSize(); ++i) {
871 table[i].firstPage = table[i].useCount > 0 ?
static_cast<pageID>(i)
877 bool (*compareFunction)(
const IndexTableEntry &,
const IndexTableEntry &);
878 switch((
int) evictionPolicy) {
882 compareFunction = seldomUsedCompare;
886 compareFunction = lruCompare;
890 compareFunction = ageCompare;
894 qSort(table, table + indexTableSize(), compareFunction);
905 while (i < indexTableSize() && numberNeeded > cacheAvail) {
906 int curIndex = table[i++].firstPage;
909 if (curIndex < 0 || static_cast<uint>(curIndex) >= indexTableSize()) {
911 <<
"out-of-bounds for index table of size" << indexTableSize();
912 throw KSDCCorrupted();
915 kDebug(
ksdcArea()) <<
"Removing entry of" << indexTable()[curIndex].totalItemSize
917 removeEntry(curIndex);
924 pageID result = pageTableSize();
925 while (i < indexTableSize() &&
926 (static_cast<uint>(result = findEmptyPages(numberNeeded))) >= pageTableSize())
928 int curIndex = table[i++].firstPage;
933 return findEmptyPages(numberNeeded);
936 if (KDE_ISUNLIKELY(static_cast<uint>(curIndex) >= indexTableSize())) {
937 throw KSDCCorrupted();
940 removeEntry(curIndex);
948 static uint totalSize(uint cacheSize, uint effectivePageSize)
950 uint numberPages =
intCeil(cacheSize, effectivePageSize);
951 uint indexTableSize = numberPages / 2;
956 IndexTableEntry *indexTableStart =
957 offsetAs<IndexTableEntry>(
static_cast<void*
>(0),
sizeof (SharedMemory));
959 indexTableStart += indexTableSize;
961 PageTableEntry *pageTableStart =
reinterpret_cast<PageTableEntry *
>(indexTableStart);
962 pageTableStart = alignTo<PageTableEntry>(pageTableStart);
963 pageTableStart += numberPages;
966 char *cacheStart =
reinterpret_cast<char *
>(pageTableStart);
967 cacheStart += (numberPages * effectivePageSize);
970 cacheStart = alignTo<char>(cacheStart,
ALIGNOF(
void*));
974 return static_cast<uint
>(
reinterpret_cast<quintptr
>(cacheStart));
977 uint fileNameHash(
const QByteArray &utf8FileName)
const 984 clearInternalTables();
987 void removeEntry(uint index);
992 class KSharedDataCache::Private
996 unsigned defaultCacheSize,
997 unsigned expectedItemSize
1003 , m_defaultCacheSize(defaultCacheSize)
1004 , m_expectedItemSize(expectedItemSize)
1014 void detachFromSharedMemory()
1020 if (shm && 0 != ::munmap(shm, m_mapSize)) {
1022 <<
static_cast<void*
>(shm) <<
":" << ::strerror(errno);
1032 void mapSharedMemory()
1035 unsigned cacheSize = qMax(m_defaultCacheSize, uint(SharedMemory::MINIMUM_CACHE_SIZE));
1036 unsigned pageSize = SharedMemory::equivalentPageSize(m_expectedItemSize);
1041 cacheSize = qMax(pageSize * 256, cacheSize);
1045 QFile file(cacheName);
1054 uint size = SharedMemory::totalSize(cacheSize, pageSize);
1057 if (size < cacheSize) {
1058 kError(
ksdcArea()) <<
"Asked for a cache size less than requested size somehow -- Logic Error :(";
1065 if (file.open(QIODevice::ReadWrite) &&
1066 (file.size() >= size ||
1072 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1078 SharedMemory *mapped =
reinterpret_cast<SharedMemory *
>(mapAddress);
1083 if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION &&
1084 mapped->version > 0)
1092 recoverCorruptedCache();
1095 else if (mapped->cacheSize > cacheSize) {
1099 cacheSize = mapped->cacheSize;
1100 unsigned actualPageSize = mapped->cachePageSize();
1101 ::munmap(mapAddress, size);
1102 size = SharedMemory::totalSize(cacheSize, actualPageSize);
1103 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1124 kWarning(
ksdcArea()) <<
"Failed to establish shared memory mapping, will fallback" 1125 <<
"to private memory -- memory usage will increase";
1127 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1133 kError(
ksdcArea()) <<
"Unable to allocate shared memory segment for shared data cache" 1134 << cacheName <<
"of size" << cacheSize;
1143 shm =
reinterpret_cast<SharedMemory *
>(mapAddress);
1151 uint usecSleepTime = 8;
1152 while (shm->ready != 2) {
1153 if (KDE_ISUNLIKELY(usecSleepTime >= (1 << 21))) {
1155 kError(
ksdcArea()) <<
"Unable to acquire shared lock, is the cache corrupt?";
1158 detachFromSharedMemory();
1162 if (shm->ready.testAndSetAcquire(0, 1)) {
1163 if (!shm->performInitialSetup(cacheSize, pageSize)) {
1164 kError(
ksdcArea()) <<
"Unable to perform initial setup, this system probably " 1165 "does not really support process-shared pthreads or " 1166 "semaphores, even though it claims otherwise.";
1169 detachFromSharedMemory();
1174 usleep(usecSleepTime);
1181 m_expectedType = shm->shmLock.type;
1182 m_lock = QSharedPointer<KSDCLock>(
createLockFromId(m_expectedType, shm->shmLock));
1183 bool isProcessSharingSupported =
false;
1185 if (!m_lock->initialize(isProcessSharingSupported)) {
1186 kError(
ksdcArea()) <<
"Unable to setup shared cache lock, although it worked when created.";
1187 detachFromSharedMemory();
1193 void recoverCorruptedCache()
1197 detachFromSharedMemory();
1211 void verifyProposedMemoryAccess(
const void *base,
unsigned accessLength)
const 1213 quintptr startOfAccess =
reinterpret_cast<quintptr
>(base);
1214 quintptr startOfShm =
reinterpret_cast<quintptr
>(shm);
1216 if (KDE_ISUNLIKELY(startOfAccess < startOfShm)) {
1217 throw KSDCCorrupted();
1220 quintptr endOfShm = startOfShm + m_mapSize;
1221 quintptr endOfAccess = startOfAccess + accessLength;
1225 if (KDE_ISUNLIKELY((endOfShm < startOfShm) ||
1226 (endOfAccess < startOfAccess) ||
1227 (endOfAccess > endOfShm)))
1229 throw KSDCCorrupted();
1235 if (KDE_ISLIKELY(shm && shm->shmLock.type == m_expectedType)) {
1236 return m_lock->lock();
1240 throw KSDCCorrupted();
1250 mutable Private * d;
1259 while (!d->lock() && !isLockedCacheSafe()) {
1260 d->recoverCorruptedCache();
1268 if (lockCount++ > 4) {
1269 kError(
ksdcArea()) <<
"There is a very serious problem with the KDE data cache" 1270 << d->m_cacheName <<
"giving up trying to access cache.";
1271 d->detachFromSharedMemory();
1282 bool isLockedCacheSafe()
const 1285 uint testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
1287 if (KDE_ISUNLIKELY(d->m_mapSize != testSize)) {
1290 if (KDE_ISUNLIKELY(d->shm->version != SharedMemory::PIXMAP_CACHE_VERSION)) {
1293 switch (d->shm->evictionPolicy) {
1307 CacheLocker(
const Private *_d) : d(const_cast<Private *>(_d))
1309 if (KDE_ISUNLIKELY(!d || !d->shm || !cautiousLock())) {
1323 return !d || d->shm == 0;
1329 QSharedPointer<KSDCLock> m_lock;
1331 uint m_defaultCacheSize;
1332 uint m_expectedItemSize;
1337 void SharedMemory::removeEntry(uint index)
1339 if (index >= indexTableSize() || cacheAvail > pageTableSize()) {
1340 throw KSDCCorrupted();
1343 PageTableEntry *pageTableEntries = pageTable();
1344 IndexTableEntry *entriesIndex = indexTable();
1347 pageID firstPage = entriesIndex[index].firstPage;
1348 if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) {
1349 kDebug(
ksdcArea()) <<
"Trying to remove an entry which is already invalid. This " 1350 <<
"cache is likely corrupt.";
1351 throw KSDCCorrupted();
1354 if (index != static_cast<uint>(pageTableEntries[firstPage].index)) {
1355 kError(
ksdcArea()) <<
"Removing entry" << index <<
"but the matching data" 1356 <<
"doesn't link back -- cache is corrupt, clearing.";
1357 throw KSDCCorrupted();
1360 uint entriesToRemove =
intCeil(entriesIndex[index].totalItemSize, cachePageSize());
1361 uint savedCacheSize = cacheAvail;
1362 for (uint i = firstPage; i < pageTableSize() &&
1363 (uint) pageTableEntries[i].index == index; ++i)
1365 pageTableEntries[i].index = -1;
1369 if ((cacheAvail - savedCacheSize) != entriesToRemove) {
1370 kError(
ksdcArea()) <<
"We somehow did not remove" << entriesToRemove
1371 <<
"when removing entry" << index <<
", instead we removed" 1372 << (cacheAvail - savedCacheSize);
1373 throw KSDCCorrupted();
1378 void *
const startOfData = page(firstPage);
1380 QByteArray str((
const char *) startOfData);
1381 str.prepend(
" REMOVED: ");
1382 str.prepend(QByteArray::number(index));
1383 str.prepend(
"ENTRY ");
1385 ::memcpy(startOfData, str.constData(), str.size() + 1);
1390 entriesIndex[index].fileNameHash = 0;
1391 entriesIndex[index].totalItemSize = 0;
1392 entriesIndex[index].useCount = 0;
1393 entriesIndex[index].lastUsedTime = 0;
1394 entriesIndex[index].addTime = 0;
1395 entriesIndex[index].firstPage = -1;
1399 unsigned defaultCacheSize,
1400 unsigned expectedItemSize)
1404 d =
new Private(cacheName, defaultCacheSize, expectedItemSize);
1406 catch(KSDCCorrupted) {
1411 d =
new Private(cacheName, defaultCacheSize, expectedItemSize);
1413 catch(KSDCCorrupted) {
1415 <<
"Even a brand-new cache starts off corrupted, something is" 1416 <<
"seriously wrong. :-(";
1432 #ifdef KSDC_MSYNC_SUPPORTED 1433 ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1435 ::munmap(d->shm, d->m_mapSize);
1447 Private::CacheLocker lock(d);
1448 if (lock.failed()) {
1452 QByteArray encodedKey = key.toUtf8();
1454 uint position = keyHash % d->shm->indexTableSize();
1457 IndexTableEntry *indices = d->shm->indexTable();
1464 const static double startCullPoint = 0.5l;
1465 const static double mustCullPoint = 0.96l;
1468 double loadFactor = 1.0 - (1.0l * d->shm->cacheAvail * d->shm->cachePageSize()
1469 / d->shm->cacheSize);
1470 bool cullCollisions =
false;
1472 if (KDE_ISUNLIKELY(loadFactor >= mustCullPoint)) {
1473 cullCollisions =
true;
1475 else if (loadFactor > startCullPoint) {
1476 const int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint);
1478 cullCollisions =
true;
1486 uint probeNumber = 1;
1487 while (indices[position].useCount > 0 && probeNumber <
MAX_PROBE_COUNT) {
1491 if (KDE_ISUNLIKELY(indices[position].fileNameHash == keyHash)) {
1499 if (cullCollisions && (::time(0) - indices[position].lastUsedTime) > 60) {
1500 indices[position].useCount >>= 1;
1501 if (indices[position].useCount == 0) {
1502 kDebug(
ksdcArea()) <<
"Overwriting existing old cached entry due to collision.";
1503 d->shm->removeEntry(position);
1509 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
1510 % d->shm->indexTableSize();
1514 if (indices[position].useCount > 0 && indices[position].firstPage >= 0) {
1515 kDebug(
ksdcArea()) <<
"Overwriting existing cached entry due to collision.";
1516 d->shm->removeEntry(position);
1522 uint fileNameLength = 1 + encodedKey.length();
1523 uint requiredSize = fileNameLength + data.size();
1524 uint pagesNeeded =
intCeil(requiredSize, d->shm->cachePageSize());
1525 uint firstPage = (uint) -1;
1527 if (pagesNeeded >= d->shm->pageTableSize()) {
1534 if (pagesNeeded > d->shm->cacheAvail ||
1535 (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize())
1538 uint freePagesDesired = 3 * qMax(1u, pagesNeeded / 2);
1540 if (d->shm->cacheAvail > freePagesDesired) {
1543 d->shm->defragment();
1544 firstPage = d->shm->findEmptyPages(pagesNeeded);
1551 d->shm->removeUsedPages(qMin(2 * freePagesDesired, d->shm->pageTableSize())
1552 - d->shm->cacheAvail);
1553 firstPage = d->shm->findEmptyPages(pagesNeeded);
1556 if (firstPage >= d->shm->pageTableSize() ||
1557 d->shm->cacheAvail < pagesNeeded)
1565 PageTableEntry *table = d->shm->pageTable();
1566 for (uint i = 0; i < pagesNeeded; ++i) {
1567 table[firstPage + i].index = position;
1571 indices[position].fileNameHash = keyHash;
1572 indices[position].totalItemSize = requiredSize;
1573 indices[position].useCount = 1;
1574 indices[position].addTime = ::time(0);
1575 indices[position].lastUsedTime = indices[position].addTime;
1576 indices[position].firstPage = firstPage;
1579 d->shm->cacheAvail -= pagesNeeded;
1582 void *dataPage = d->shm->page(firstPage);
1583 if (KDE_ISUNLIKELY(!dataPage)) {
1584 throw KSDCCorrupted();
1588 d->verifyProposedMemoryAccess(dataPage, requiredSize);
1591 uchar *startOfPageData =
reinterpret_cast<uchar *
>(dataPage);
1592 ::memcpy(startOfPageData, encodedKey.constData(), fileNameLength);
1593 ::memcpy(startOfPageData + fileNameLength, data.constData(), data.size());
1597 catch(KSDCCorrupted) {
1598 d->recoverCorruptedCache();
1606 Private::CacheLocker lock(d);
1607 if (lock.failed()) {
1612 QByteArray encodedKey = key.toUtf8();
1613 qint32 entry = d->shm->findNamedEntry(encodedKey);
1616 const IndexTableEntry *
header = &d->shm->indexTable()[entry];
1617 const void *resultPage = d->shm->page(
header->firstPage);
1618 if (KDE_ISUNLIKELY(!resultPage)) {
1619 throw KSDCCorrupted();
1622 d->verifyProposedMemoryAccess(resultPage,
header->totalItemSize);
1625 header->lastUsedTime = ::time(0);
1629 const char *cacheData =
reinterpret_cast<const char *
>(resultPage);
1630 cacheData += encodedKey.size();
1634 *destination = QByteArray(cacheData,
header->totalItemSize - encodedKey.size() - 1);
1640 catch(KSDCCorrupted) {
1641 d->recoverCorruptedCache();
1650 Private::CacheLocker lock(d);
1652 if(!lock.failed()) {
1656 catch(KSDCCorrupted) {
1657 d->recoverCorruptedCache();
1664 Private::CacheLocker lock(d);
1665 if (lock.failed()) {
1669 return d->shm->findNamedEntry(key.toUtf8()) >= 0;
1671 catch(KSDCCorrupted) {
1672 d->recoverCorruptedCache();
1685 QFile::remove(cachePath);
1691 Private::CacheLocker lock(d);
1692 if (lock.failed()) {
1696 return d->shm->cacheSize;
1698 catch(KSDCCorrupted) {
1699 d->recoverCorruptedCache();
1707 Private::CacheLocker lock(d);
1708 if (lock.failed()) {
1712 return d->shm->cacheAvail * d->shm->cachePageSize();
1714 catch(KSDCCorrupted) {
1715 d->recoverCorruptedCache();
1723 return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(0));
1732 d->shm->evictionPolicy.fetchAndStoreRelease(static_cast<int>(newPolicy));
1739 return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(0));
1748 d->shm->cacheTimestamp.fetchAndStoreRelease(static_cast<int>(newTimestamp));
static SharedLockId findBestSharedLock()
This is a method to determine the best lock type to use for a shared cache, based on local support...
static bool ensureFileAllocated(int fd, size_t fileSize)
static const uint MAX_PROBE_COUNT
The maximum number of probes to make while searching for a bucket in the presence of collisions in th...
bool insert(const QString &key, const QByteArray &data)
Attempts to insert the entry data into the shared cache, named by key, and returns true only if succe...
KStandardDirs * dirs()
Returns the application standard dirs object.
bool contains(const QString &key) const
Returns true if the cache currently contains the image for the given filename.
static QDebug kError(bool cond, int area=KDE_DEFAULT_DEBUG_AREA)
void setTimestamp(unsigned newTimestamp)
Sets the shared timestamp of the cache.
static unsigned int MurmurHashAligned(const void *key, int len, unsigned int seed)
const T * offsetAs(const void *const base, qint32 offset)
Returns a pointer to a const object of type T, assumed to be offset BYTES greater than the base addre...
static void deleteCache(const QString &cacheName)
Removes the underlying file from the cache.
int random()
Generates a uniform random number.
unsigned freeSize() const
Returns the amount of free space in the cache, in bytes.
static unsigned countSetBits(unsigned value)
static int registerArea(const QByteArray &areaName, bool enabled=true)
void clear()
Removes all entries from the cache.
unsigned timestamp() const
static quint32 generateHash(const QByteArray &buffer)
This is the hash function used for our data to hopefully make the hashing used to place the QByteArra...
KSharedDataCache(const QString &cacheName, unsigned defaultCacheSize, unsigned expectedItemSize=0)
Attaches to a shared cache, creating it if necessary.
#define offsetof(TYPE, MEMBER)
unsigned totalSize() const
Returns the usable cache size in bytes.
static unsigned intCeil(unsigned a, unsigned b)
bool find(const QString &key, QByteArray *destination) const
Returns the data in the cache named by key (even if it's some other process's data named with the sam...
static KSDCLock * createLockFromId(SharedLockId id, SharedLock &lock)
static QString locateLocal(const char *type, const QString &filename, const KComponentData &cData=KGlobal::mainComponent())
This function is much like locate.
EvictionPolicy evictionPolicy() const
void setEvictionPolicy(EvictionPolicy newPolicy)
Sets the entry removal policy for the shared cache to newPolicy.
T * alignTo(const void *start, uint size=ALIGNOF(T))