• Skip to content
  • Skip to link menu
  • KDE API Reference
  • kdelibs-4.10.4 API Reference
  • KDE Home
  • Contact Us
 

KDECore

  • kdecore
  • util
kshareddatacache.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of the KDE project.
3  * Copyright © 2010, 2012 Michael Pyne <mpyne@kde.org>
4  * Copyright © 2012 Ralf Jung <ralfjung-e@gmx.de>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Library General Public
8  * License version 2 as published by the Free Software Foundation.
9  *
10  * This library includes "MurmurHash" code from Austin Appleby, which is
11  * placed in the public domain. See http://sites.google.com/site/murmurhash/
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Library General Public License for more details.
17  *
18  * You should have received a copy of the GNU Library General Public License
19  * along with this library; see the file COPYING.LIB. If not, write to
20  * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
21  * Boston, MA 02110-1301, USA.
22  */
23 
24 #include "kshareddatacache.h"
25 #include "kshareddatacache_p.h" // Various auxiliary support code
26 
27 #include <kdebug.h>
28 #include <kglobal.h>
29 #include <kstandarddirs.h>
30 #include <krandom.h>
31 
32 #include <QtCore/QDateTime>
33 #include <QtCore/QSharedPointer>
34 #include <QtCore/QByteArray>
35 #include <QtCore/QFile>
36 #include <QtCore/QAtomicInt>
37 #include <QtCore/QList>
38 #include <QtCore/QMutex>
39 #include <QtCore/QMutexLocker>
40 
41 #include <sys/types.h>
42 #include <sys/mman.h>
43 #include <stdlib.h>
44 
47 static const uint MAX_PROBE_COUNT = 6;
48 
49 int ksdcArea()
50 {
51  static int s_ksdcArea = KDebug::registerArea("KSharedDataCache", false);
52  return s_ksdcArea;
53 }
54 
63 class KSDCCorrupted
64 {
65  public:
66  KSDCCorrupted()
67  {
68  kError(ksdcArea()) << "Error detected in cache, re-generating";
69  }
70 };
71 
72 //-----------------------------------------------------------------------------
73 // MurmurHashAligned, by Austin Appleby
74 // (Released to the public domain, or licensed under the MIT license where
75 // software may not be released to the public domain. See
76 // http://sites.google.com/site/murmurhash/)
77 
78 // Same algorithm as MurmurHash, but only does aligned reads - should be safer
79 // on certain platforms.
80 static unsigned int MurmurHashAligned(const void *key, int len, unsigned int seed)
81 {
82  const unsigned int m = 0xc6a4a793;
83  const int r = 16;
84 
85  const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
86 
87  unsigned int h = seed ^ (len * m);
88 
89  int align = reinterpret_cast<quintptr>(data) & 3;
90 
91  if(align & (len >= 4))
92  {
93  // Pre-load the temp registers
94 
95  unsigned int t = 0, d = 0;
96 
97  switch(align)
98  {
99  case 1: t |= data[2] << 16;
100  case 2: t |= data[1] << 8;
101  case 3: t |= data[0];
102  }
103 
104  t <<= (8 * align);
105 
106  data += 4-align;
107  len -= 4-align;
108 
109  int sl = 8 * (4-align);
110  int sr = 8 * align;
111 
112  // Mix
113 
114  while(len >= 4)
115  {
116  d = *reinterpret_cast<const unsigned int *>(data);
117  t = (t >> sr) | (d << sl);
118  h += t;
119  h *= m;
120  h ^= h >> r;
121  t = d;
122 
123  data += 4;
124  len -= 4;
125  }
126 
127  // Handle leftover data in temp registers
128 
129  int pack = len < align ? len : align;
130 
131  d = 0;
132 
133  switch(pack)
134  {
135  case 3: d |= data[2] << 16;
136  case 2: d |= data[1] << 8;
137  case 1: d |= data[0];
138  case 0: h += (t >> sr) | (d << sl);
139  h *= m;
140  h ^= h >> r;
141  }
142 
143  data += pack;
144  len -= pack;
145  }
146  else
147  {
148  while(len >= 4)
149  {
150  h += *reinterpret_cast<const unsigned int *>(data);
151  h *= m;
152  h ^= h >> r;
153 
154  data += 4;
155  len -= 4;
156  }
157  }
158 
159  //----------
160  // Handle tail bytes
161 
162  switch(len)
163  {
164  case 3: h += data[2] << 16;
165  case 2: h += data[1] << 8;
166  case 1: h += data[0];
167  h *= m;
168  h ^= h >> r;
169  };
170 
171  h *= m;
172  h ^= h >> 10;
173  h *= m;
174  h ^= h >> 17;
175 
176  return h;
177 }
178 
183 static quint32 generateHash(const QByteArray &buffer)
184 {
185  // The final constant is the "seed" for MurmurHash. Do *not* change it
186  // without incrementing the cache version.
187  return MurmurHashAligned(buffer.data(), buffer.size(), 0xF0F00F0F);
188 }
189 
190 // Alignment concerns become a big deal when we're dealing with shared memory,
191 // since trying to access a structure sized at, say 8 bytes at an address that
192 // is not evenly divisible by 8 is a crash-inducing error on some
193 // architectures. The compiler would normally take care of this, but with
194 // shared memory the compiler will not necessarily know the alignment expected,
195 // so make sure we account for this ourselves. To do so we need a way to find
196 // out the expected alignment. Enter ALIGNOF...
197 #ifndef ALIGNOF
198 #if defined(Q_CC_GNU) || defined(Q_CC_SUN)
199 #define ALIGNOF(x) (__alignof__ (x)) // GCC provides what we want directly
200 #else
201 
202 #include <stddef.h> // offsetof
203 
204 template<class T>
205 struct __alignmentHack
206 {
207  char firstEntry;
208  T obj;
209  static const size_t size = offsetof(__alignmentHack, obj);
210 };
211 #define ALIGNOF(x) (__alignmentHack<x>::size)
212 #endif // Non gcc
213 #endif // ALIGNOF undefined
214 
215 // Returns a pointer properly aligned to handle size alignment.
216 // size should be a power of 2. start is assumed to be the lowest
217 // permissible address, therefore the return value will be >= start.
218 template<class T>
219 T* alignTo(const void *start, uint size = ALIGNOF(T))
220 {
221  quintptr mask = size - 1;
222 
223  // Cast to int-type to handle bit-twiddling
224  quintptr basePointer = reinterpret_cast<quintptr>(start);
225 
226  // If (and only if) we are already aligned, adding mask into basePointer
227  // will not increment any of the bits in ~mask and we get the right answer.
228  basePointer = (basePointer + mask) & ~mask;
229 
230  return reinterpret_cast<T *>(basePointer);
231 }
232 
239 template<class T>
240 const T *offsetAs(const void *const base, qint32 offset)
241 {
242  const char *ptr = reinterpret_cast<const char*>(base);
243  return alignTo<const T>(ptr + offset);
244 }
245 
246 // Same as above, but for non-const objects
247 template<class T>
248 T *offsetAs(void *const base, qint32 offset)
249 {
250  char *ptr = reinterpret_cast<char *>(base);
251  return alignTo<T>(ptr + offset);
252 }
253 
259 unsigned intCeil(unsigned a, unsigned b)
260 {
261  if (KDE_ISUNLIKELY(b == 0)) {
262  throw KSDCCorrupted();
263  }
264 
265  return (a + b - 1) / b;
266 }
267 
271 static unsigned countSetBits(unsigned value)
272 {
273  // K&R / Wegner's algorithm used. GCC supports __builtin_popcount but we
274  // expect there to always be only 1 bit set so this should be perhaps a bit
275  // faster 99.9% of the time.
276  unsigned count = 0;
277  for (count = 0; value != 0; count++) {
278  value &= (value - 1); // Clears least-significant set bit.
279  }
280  return count;
281 }
282 
283 typedef qint32 pageID;
284 
285 // =========================================================================
286 // Description of the cache:
287 //
288 // The shared memory cache is designed to be handled as two separate objects,
289 // all contained in the same global memory segment. First off, there is the
290 // basic header data, consisting of the global header followed by the
291 // accounting data necessary to hold items (described in more detail
292 // momentarily). Following the accounting data is the start of the "page table"
293 // (essentially just as you'd see it in an Operating Systems text).
294 //
295 // The page table contains shared memory split into fixed-size pages, with a
296 // configurable page size. In the event that the data is too large to fit into
297 // a single logical page, it will need to occupy consecutive pages of memory.
298 //
299 // The accounting data that was referenced earlier is split into two:
300 //
301 // 1. index table, containing a fixed-size list of possible cache entries.
302 // Each index entry is of type IndexTableEntry (below), and holds the various
303 // accounting data and a pointer to the first page.
304 //
305 // 2. page table, which is used to speed up the process of searching for
306 // free pages of memory. There is one entry for every page in the page table,
307 // and it contains the index of the one entry in the index table actually
308 // holding the page (or <0 if the page is free).
309 //
310 // The entire segment looks like so:
311 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══?
312 // ? Header │ Index Table │ Page Table ? Pages │ │ │ │...?
313 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══?
314 // =========================================================================
315 
316 // All elements of this struct must be "plain old data" (POD) types since it
317 // will be in shared memory. In addition, no pointers! To point to something
318 // you must use relative offsets since the pointer start addresses will be
319 // different in each process.
320 struct IndexTableEntry
321 {
322  uint fileNameHash;
323  uint totalItemSize; // in bytes
324  mutable uint useCount;
325  time_t addTime;
326  mutable time_t lastUsedTime;
327  pageID firstPage;
328 };
329 
330 // Page table entry
331 struct PageTableEntry
332 {
333  // int so we can use values <0 for unassigned pages.
334  qint32 index;
335 };
336 
337 // Each individual page contains the cached data. The first page starts off with
338 // the utf8-encoded key, a null '\0', and then the data follows immediately
339 // from the next byte, possibly crossing consecutive page boundaries to hold
340 // all of the data.
341 // There is, however, no specific struct for a page, it is simply a location in
342 // memory.
343 
344 // This is effectively the layout of the shared memory segment. The variables
345 // contained within form the header, data contained afterwards is pointed to
346 // by using special accessor functions.
347 struct SharedMemory
348 {
356  enum {
357  PIXMAP_CACHE_VERSION = 12,
358  MINIMUM_CACHE_SIZE = 4096
359  };
360 
361  // Note to those who follow me. You should not, under any circumstances, ever
362  // re-arrange the following two fields, even if you change the version number
363  // for later revisions of this code.
364  QAtomicInt ready;
365  quint8 version;
366 
367  // See kshareddatacache_p.h
368  SharedLock shmLock;
369 
370  uint cacheSize;
371  uint cacheAvail;
372  QAtomicInt evictionPolicy;
373 
374  // pageSize and cacheSize determine the number of pages. The number of
375  // pages determine the page table size and (indirectly) the index table
376  // size.
377  QAtomicInt pageSize;
378 
379  // This variable is added to reserve space for later cache timestamping
380  // support. The idea is this variable will be updated when the cache is
381  // written to, to allow clients to detect a changed cache quickly.
382  QAtomicInt cacheTimestamp;
383 
387  static unsigned equivalentPageSize(unsigned itemSize)
388  {
389  if (itemSize == 0) {
390  return 4096; // Default average item size.
391  }
392 
393  int log2OfSize = 0;
394  while ((itemSize >>= 1) != 0) {
395  log2OfSize++;
396  }
397 
398  // Bound page size between 512 bytes and 256 KiB.
399  // If this is adjusted, also alter validSizeMask in cachePageSize
400  log2OfSize = qBound(9, log2OfSize, 18);
401 
402  return (1 << log2OfSize);
403  }
404 
405  // Returns pageSize in unsigned format.
406  unsigned cachePageSize() const
407  {
408  unsigned _pageSize = static_cast<unsigned>(pageSize);
409  // bits 9-18 may be set.
410  static const unsigned validSizeMask = 0x7FE00u;
411 
412  // Check for page sizes that are not a power-of-2, or are too low/high.
413  if (KDE_ISUNLIKELY(countSetBits(_pageSize) != 1 || (_pageSize & ~validSizeMask))) {
414  throw KSDCCorrupted();
415  }
416 
417  return _pageSize;
418  }
419 
432  bool performInitialSetup(uint _cacheSize, uint _pageSize)
433  {
434  if (_cacheSize < MINIMUM_CACHE_SIZE) {
435  kError(ksdcArea()) << "Internal error: Attempted to create a cache sized < "
436  << MINIMUM_CACHE_SIZE;
437  return false;
438  }
439 
440  if (_pageSize == 0) {
441  kError(ksdcArea()) << "Internal error: Attempted to create a cache with 0-sized pages.";
442  return false;
443  }
444 
445  shmLock.type = findBestSharedLock();
446  if (shmLock.type == LOCKTYPE_INVALID) {
447  kError(ksdcArea()) << "Unable to find an appropriate lock to guard the shared cache. "
448  << "This *should* be essentially impossible. :(";
449  return false;
450  }
451 
452  bool isProcessShared = false;
453  QSharedPointer<KSDCLock> tempLock(createLockFromId(shmLock.type, shmLock));
454 
455  if (!tempLock->initialize(isProcessShared)) {
456  kError(ksdcArea()) << "Unable to initialize the lock for the cache!";
457  return false;
458  }
459 
460  if (!isProcessShared) {
461  kWarning(ksdcArea()) << "Cache initialized, but does not support being"
462  << "shared across processes.";
463  }
464 
465  // These must be updated to make some of our auxiliary functions
466  // work right since their values will be based on the cache size.
467  cacheSize = _cacheSize;
468  pageSize = _pageSize;
469  version = PIXMAP_CACHE_VERSION;
470  cacheTimestamp = static_cast<unsigned>(::time(0));
471 
472  clearInternalTables();
473 
474  // Unlock the mini-lock, and introduce a total memory barrier to make
475  // sure all changes have propagated even without a mutex.
476  ready.ref();
477 
478  return true;
479  }
480 
481  void clearInternalTables()
482  {
483  // Assumes we're already locked somehow.
484  cacheAvail = pageTableSize();
485 
486  // Setup page tables to point nowhere
487  PageTableEntry *table = pageTable();
488  for (uint i = 0; i < pageTableSize(); ++i) {
489  table[i].index = -1;
490  }
491 
492  // Setup index tables to be accurate.
493  IndexTableEntry *indices = indexTable();
494  for (uint i = 0; i < indexTableSize(); ++i) {
495  indices[i].firstPage = -1;
496  indices[i].useCount = 0;
497  indices[i].fileNameHash = 0;
498  indices[i].totalItemSize = 0;
499  indices[i].addTime = 0;
500  indices[i].lastUsedTime = 0;
501  }
502  }
503 
504  const IndexTableEntry *indexTable() const
505  {
506  // Index Table goes immediately after this struct, at the first byte
507  // where alignment constraints are met (accounted for by offsetAs).
508  return offsetAs<IndexTableEntry>(this, sizeof(*this));
509  }
510 
511  const PageTableEntry *pageTable() const
512  {
513  const IndexTableEntry *base = indexTable();
514  base += indexTableSize();
515 
516  // Let's call wherever we end up the start of the page table...
517  return alignTo<PageTableEntry>(base);
518  }
519 
520  const void *cachePages() const
521  {
522  const PageTableEntry *tableStart = pageTable();
523  tableStart += pageTableSize();
524 
525  // Let's call wherever we end up the start of the data...
526  return alignTo<void>(tableStart, cachePageSize());
527  }
528 
529  const void *page(pageID at) const
530  {
531  if (static_cast<uint>(at) >= pageTableSize()) {
532  return 0;
533  }
534 
535  // We must manually calculate this one since pageSize varies.
536  const char *pageStart = reinterpret_cast<const char *>(cachePages());
537  pageStart += (at * cachePageSize());
538 
539  return reinterpret_cast<const void *>(pageStart);
540  }
541 
542  // The following are non-const versions of some of the methods defined
543  // above. They use const_cast<> because I feel that is better than
544  // duplicating the code. I suppose template member functions (?)
545  // may work, may investigate later.
546  IndexTableEntry *indexTable()
547  {
548  const SharedMemory *that = const_cast<const SharedMemory*>(this);
549  return const_cast<IndexTableEntry *>(that->indexTable());
550  }
551 
552  PageTableEntry *pageTable()
553  {
554  const SharedMemory *that = const_cast<const SharedMemory*>(this);
555  return const_cast<PageTableEntry *>(that->pageTable());
556  }
557 
558  void *cachePages()
559  {
560  const SharedMemory *that = const_cast<const SharedMemory*>(this);
561  return const_cast<void *>(that->cachePages());
562  }
563 
564  void *page(pageID at)
565  {
566  const SharedMemory *that = const_cast<const SharedMemory*>(this);
567  return const_cast<void *>(that->page(at));
568  }
569 
570  uint pageTableSize() const
571  {
572  return cacheSize / cachePageSize();
573  }
574 
575  uint indexTableSize() const
576  {
577  // Assume 2 pages on average are needed -> the number of entries
578  // would be half of the number of pages.
579  return pageTableSize() / 2;
580  }
581 
586  pageID findEmptyPages(uint pagesNeeded) const
587  {
588  if (KDE_ISUNLIKELY(pagesNeeded > pageTableSize())) {
589  return pageTableSize();
590  }
591 
592  // Loop through the page table, find the first empty page, and just
593  // makes sure that there are enough free pages.
594  const PageTableEntry *table = pageTable();
595  uint contiguousPagesFound = 0;
596  pageID base = 0;
597  for (pageID i = 0; i < static_cast<int>(pageTableSize()); ++i) {
598  if (table[i].index < 0) {
599  if (contiguousPagesFound == 0) {
600  base = i;
601  }
602  contiguousPagesFound++;
603  }
604  else {
605  contiguousPagesFound = 0;
606  }
607 
608  if (contiguousPagesFound == pagesNeeded) {
609  return base;
610  }
611  }
612 
613  return pageTableSize();
614  }
615 
616  // left < right?
617  static bool lruCompare(const IndexTableEntry &l, const IndexTableEntry &r)
618  {
619  // Ensure invalid entries migrate to the end
620  if (l.firstPage < 0 && r.firstPage >= 0) {
621  return false;
622  }
623  if (l.firstPage >= 0 && r.firstPage < 0) {
624  return true;
625  }
626 
627  // Most recently used will have the highest absolute time =>
628  // least recently used (lowest) should go first => use left < right
629  return l.lastUsedTime < r.lastUsedTime;
630  }
631 
632  // left < right?
633  static bool seldomUsedCompare(const IndexTableEntry &l, const IndexTableEntry &r)
634  {
635  // Ensure invalid entries migrate to the end
636  if (l.firstPage < 0 && r.firstPage >= 0) {
637  return false;
638  }
639  if (l.firstPage >= 0 && r.firstPage < 0) {
640  return true;
641  }
642 
643  // Put lowest use count at start by using left < right
644  return l.useCount < r.useCount;
645  }
646 
647  // left < right?
648  static bool ageCompare(const IndexTableEntry &l, const IndexTableEntry &r)
649  {
650  // Ensure invalid entries migrate to the end
651  if (l.firstPage < 0 && r.firstPage >= 0) {
652  return false;
653  }
654  if (l.firstPage >= 0 && r.firstPage < 0) {
655  return true;
656  }
657 
658  // Oldest entries die first -- they have the lowest absolute add time,
659  // so just like the others use left < right
660  return l.addTime < r.addTime;
661  }
662 
663  void defragment()
664  {
665  if (cacheAvail * cachePageSize() == cacheSize) {
666  return; // That was easy
667  }
668 
669  kDebug(ksdcArea()) << "Defragmenting the shared cache";
670 
671  // Just do a linear scan, and anytime there is free space, swap it
672  // with the pages to its right. In order to meet the precondition
673  // we need to skip any used pages first.
674 
675  pageID currentPage = 0;
676  pageID idLimit = static_cast<pageID>(pageTableSize());
677  PageTableEntry *pages = pageTable();
678 
679  if (KDE_ISUNLIKELY(!pages || idLimit <= 0)) {
680  throw KSDCCorrupted();
681  }
682 
683  // Skip used pages
684  while (currentPage < idLimit && pages[currentPage].index >= 0) {
685  ++currentPage;
686  }
687 
688  pageID freeSpot = currentPage;
689 
690  // Main loop, starting from a free page, skip to the used pages and
691  // move them back.
692  while (currentPage < idLimit) {
693  // Find the next used page
694  while (currentPage < idLimit && pages[currentPage].index < 0) {
695  ++currentPage;
696  }
697 
698  if (currentPage >= idLimit) {
699  break;
700  }
701 
702  // Found an entry, move it.
703  qint32 affectedIndex = pages[currentPage].index;
704  if (KDE_ISUNLIKELY(affectedIndex < 0 ||
705  affectedIndex >= idLimit ||
706  indexTable()[affectedIndex].firstPage != currentPage))
707  {
708  throw KSDCCorrupted();
709  }
710 
711  indexTable()[affectedIndex].firstPage = freeSpot;
712 
713  // Moving one page at a time guarantees we can use memcpy safely
714  // (in other words, the source and destination will not overlap).
715  while (currentPage < idLimit && pages[currentPage].index >= 0) {
716  const void *const sourcePage = page(currentPage);
717  void *const destinationPage = page(freeSpot);
718 
719  if(KDE_ISUNLIKELY(!sourcePage || !destinationPage)) {
720  throw KSDCCorrupted();
721  }
722 
723  ::memcpy(destinationPage, sourcePage, cachePageSize());
724  pages[freeSpot].index = affectedIndex;
725  pages[currentPage].index = -1;
726  ++currentPage;
727  ++freeSpot;
728 
729  // If we've just moved the very last page and it happened to
730  // be at the very end of the cache then we're done.
731  if (currentPage >= idLimit) {
732  break;
733  }
734 
735  // We're moving consecutive used pages whether they belong to
736  // our affected entry or not, so detect if we've started moving
737  // the data for a different entry and adjust if necessary.
738  if (affectedIndex != pages[currentPage].index) {
739  indexTable()[pages[currentPage].index].firstPage = freeSpot;
740  }
741  affectedIndex = pages[currentPage].index;
742  }
743 
744  // At this point currentPage is on a page that is unused, and the
745  // cycle repeats. However, currentPage is not the first unused
746  // page, freeSpot is, so leave it alone.
747  }
748  }
749 
756  qint32 findNamedEntry(const QByteArray &key) const
757  {
758  uint keyHash = generateHash(key);
759  uint position = keyHash % indexTableSize();
760  uint probeNumber = 1; // See insert() for description
761 
762  // Imagine 3 entries A, B, C in this logical probing chain. If B is
763  // later removed then we can't find C either. So, we must keep
764  // searching for probeNumber number of tries (or we find the item,
765  // obviously).
766  while (indexTable()[position].fileNameHash != keyHash &&
767  probeNumber < MAX_PROBE_COUNT)
768  {
769  position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
770  % indexTableSize();
771  probeNumber++;
772  }
773 
774  if (indexTable()[position].fileNameHash == keyHash) {
775  pageID firstPage = indexTable()[position].firstPage;
776  if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) {
777  return -1;
778  }
779 
780  const void *resultPage = page(firstPage);
781  if (KDE_ISUNLIKELY(!resultPage)) {
782  throw KSDCCorrupted();
783  }
784 
785  const char *utf8FileName = reinterpret_cast<const char *>(resultPage);
786  if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) {
787  return position;
788  }
789  }
790 
791  return -1; // Not found, or a different one found.
792  }
793 
794  // Function to use with QSharedPointer in removeUsedPages below...
795  static void deleteTable(IndexTableEntry *table) {
796  delete [] table;
797  }
798 
809  uint removeUsedPages(uint numberNeeded)
810  {
811  if (numberNeeded == 0) {
812  kError(ksdcArea()) << "Internal error: Asked to remove exactly 0 pages for some reason.";
813  throw KSDCCorrupted();
814  }
815 
816  if (numberNeeded > pageTableSize()) {
817  kError(ksdcArea()) << "Internal error: Requested more space than exists in the cache.";
818  kError(ksdcArea()) << numberNeeded << "requested, " << pageTableSize() << "is the total possible.";
819  throw KSDCCorrupted();
820  }
821 
822  // If the cache free space is large enough we will defragment first
823  // instead since it's likely we're highly fragmented.
824  // Otherwise, we will (eventually) simply remove entries per the
825  // eviction order set for the cache until there is enough room
826  // available to hold the number of pages we need.
827 
828  kDebug(ksdcArea()) << "Removing old entries to free up" << numberNeeded << "pages,"
829  << cacheAvail << "are already theoretically available.";
830 
831  if (cacheAvail > 3 * numberNeeded) {
832  defragment();
833  uint result = findEmptyPages(numberNeeded);
834 
835  if (result < pageTableSize()) {
836  return result;
837  }
838  else {
839  kError(ksdcArea()) << "Just defragmented a locked cache, but still there"
840  << "isn't enough room for the current request.";
841  }
842  }
843 
844  // At this point we know we'll have to free some space up, so sort our
845  // list of entries by whatever the current criteria are and start
846  // killing expired entries.
847  QSharedPointer<IndexTableEntry> tablePtr(new IndexTableEntry[indexTableSize()], deleteTable);
848 
849  if (!tablePtr) {
850  kError(ksdcArea()) << "Unable to allocate temporary memory for sorting the cache!";
851  clearInternalTables();
852  throw KSDCCorrupted();
853  }
854 
855  // We use tablePtr to ensure the data is destroyed, but do the access
856  // via a helper pointer to allow for array ops.
857  IndexTableEntry *table = tablePtr.data();
858 
859  ::memcpy(table, indexTable(), sizeof(IndexTableEntry) * indexTableSize());
860 
861  // Our entry ID is simply its index into the
862  // index table, which qSort will rearrange all willy-nilly, so first
863  // we'll save the *real* entry ID into firstPage (which is useless in
864  // our copy of the index table). On the other hand if the entry is not
865  // used then we note that with -1.
866  for (uint i = 0; i < indexTableSize(); ++i) {
867  table[i].firstPage = table[i].useCount > 0 ? static_cast<pageID>(i)
868  : -1;
869  }
870 
871  // Declare the comparison function that we'll use to pass to qSort,
872  // based on our cache eviction policy.
873  bool (*compareFunction)(const IndexTableEntry &, const IndexTableEntry &);
874  switch((int) evictionPolicy) {
875  case (int) KSharedDataCache::EvictLeastOftenUsed:
876  case (int) KSharedDataCache::NoEvictionPreference:
877  default:
878  compareFunction = seldomUsedCompare;
879  break;
880 
881  case (int) KSharedDataCache::EvictLeastRecentlyUsed:
882  compareFunction = lruCompare;
883  break;
884 
885  case (int) KSharedDataCache::EvictOldest:
886  compareFunction = ageCompare;
887  break;
888  }
889 
890  qSort(table, table + indexTableSize(), compareFunction);
891 
892  // Least recently used entries will be in the front.
893  // Start killing until we have room.
894 
895  // Note on removeEntry: It expects an index into the index table,
896  // but our sorted list is all jumbled. But we stored the real index
897  // in the firstPage member.
898  // Remove entries until we've removed at least the required number
899  // of pages.
900  uint i = 0;
901  while (i < indexTableSize() && numberNeeded > cacheAvail) {
902  int curIndex = table[i++].firstPage; // Really an index, not a page
903 
904  // Removed everything, still no luck (or curIndex is set but too high).
905  if (curIndex < 0 || static_cast<uint>(curIndex) >= indexTableSize()) {
906  kError(ksdcArea()) << "Trying to remove index" << curIndex
907  << "out-of-bounds for index table of size" << indexTableSize();
908  throw KSDCCorrupted();
909  }
910 
911  kDebug(ksdcArea()) << "Removing entry of" << indexTable()[curIndex].totalItemSize
912  << "size";
913  removeEntry(curIndex);
914  }
915 
916  // At this point let's see if we have freed up enough data by
917  // defragmenting first and seeing if we can find that free space.
918  defragment();
919 
920  pageID result = pageTableSize();
921  while (i < indexTableSize() &&
922  (static_cast<uint>(result = findEmptyPages(numberNeeded))) >= pageTableSize())
923  {
924  int curIndex = table[i++].firstPage;
925 
926  if (curIndex < 0) {
927  // One last shot.
928  defragment();
929  return findEmptyPages(numberNeeded);
930  }
931 
932  if (KDE_ISUNLIKELY(static_cast<uint>(curIndex) >= indexTableSize())) {
933  throw KSDCCorrupted();
934  }
935 
936  removeEntry(curIndex);
937  }
938 
939  // Whew.
940  return result;
941  }
942 
943  // Returns the total size required for a given cache size.
944  static uint totalSize(uint cacheSize, uint effectivePageSize)
945  {
946  uint numberPages = intCeil(cacheSize, effectivePageSize);
947  uint indexTableSize = numberPages / 2;
948 
949  // Knowing the number of pages, we can determine what addresses we'd be
950  // using (properly aligned), and from there determine how much memory
951  // we'd use.
952  IndexTableEntry *indexTableStart =
953  offsetAs<IndexTableEntry>(static_cast<void*>(0), sizeof (SharedMemory));
954 
955  indexTableStart += indexTableSize;
956 
957  PageTableEntry *pageTableStart = reinterpret_cast<PageTableEntry *>(indexTableStart);
958  pageTableStart = alignTo<PageTableEntry>(pageTableStart);
959  pageTableStart += numberPages;
960 
961  // The weird part, we must manually adjust the pointer based on the page size.
962  char *cacheStart = reinterpret_cast<char *>(pageTableStart);
963  cacheStart += (numberPages * effectivePageSize);
964 
965  // ALIGNOF gives pointer alignment
966  cacheStart = alignTo<char>(cacheStart, ALIGNOF(void*));
967 
968  // We've traversed the header, index, page table, and cache.
969  // Wherever we're at now is the size of the enchilada.
970  return static_cast<uint>(reinterpret_cast<quintptr>(cacheStart));
971  }
972 
973  uint fileNameHash(const QByteArray &utf8FileName) const
974  {
975  return generateHash(utf8FileName) % indexTableSize();
976  }
977 
978  void clear()
979  {
980  clearInternalTables();
981  }
982 
983  void removeEntry(uint index);
984 };
985 
986 // The per-instance private data, such as map size, whether
987 // attached or not, pointer to shared memory, etc.
988 class KSharedDataCache::Private
989 {
990  public:
991  Private(const QString &name,
992  unsigned defaultCacheSize,
993  unsigned expectedItemSize
994  )
995  : m_cacheName(name)
996  , shm(0)
997  , m_lock(0)
998  , m_mapSize(0)
999  , m_defaultCacheSize(defaultCacheSize)
1000  , m_expectedItemSize(expectedItemSize)
1001  , m_expectedType(LOCKTYPE_INVALID)
1002  {
1003  mapSharedMemory();
1004  }
1005 
1006  // Put the cache in a condition to be able to call mapSharedMemory() by
1007  // completely detaching from shared memory (such as to respond to an
1008  // unrecoverable error).
1009  // m_mapSize must already be set to the amount of memory mapped to shm.
1010  void detachFromSharedMemory()
1011  {
1012  // The lock holds a reference into shared memory, so this must be
1013  // cleared before shm is removed.
1014  m_lock.clear();
1015 
1016  if (shm && !::munmap(shm, m_mapSize)) {
1017  kError(ksdcArea()) << "Unable to unmap shared memory segment"
1018  << static_cast<void*>(shm);
1019  }
1020 
1021  shm = 0;
1022  m_mapSize = 0;
1023  }
1024 
1025  // This function does a lot of the important work, attempting to connect to shared
1026  // memory, a private anonymous mapping if that fails, and failing that, nothing (but
1027  // the cache remains "valid", we just don't actually do anything).
1028  void mapSharedMemory()
1029  {
1030  // 0-sized caches are fairly useless.
1031  unsigned cacheSize = qMax(m_defaultCacheSize, uint(SharedMemory::MINIMUM_CACHE_SIZE));
1032  unsigned pageSize = SharedMemory::equivalentPageSize(m_expectedItemSize);
1033 
1034  // Ensure that the cache is sized such that there is a minimum number of
1035  // pages available. (i.e. a cache consisting of only 1 page is fairly
1036  // useless and probably crash-prone).
1037  cacheSize = qMax(pageSize * 256, cacheSize);
1038 
1039  // The m_cacheName is used to find the file to store the cache in.
1040  QString cacheName = KGlobal::dirs()->locateLocal("cache", m_cacheName + QLatin1String(".kcache"));
1041  QFile file(cacheName);
1042 
1043  // The basic idea is to open the file that we want to map into shared
1044  // memory, and then actually establish the mapping. Once we have mapped the
1045  // file into shared memory we can close the file handle, the mapping will
1046  // still be maintained (unless the file is resized to be shorter than
1047  // expected, which we don't handle yet :-( )
1048 
1049  // size accounts for the overhead over the desired cacheSize
1050  uint size = SharedMemory::totalSize(cacheSize, pageSize);
1051  void *mapAddress = MAP_FAILED;
1052 
1053  if (size < cacheSize) {
1054  kError(ksdcArea()) << "Asked for a cache size less than requested size somehow -- Logic Error :(";
1055  return;
1056  }
1057 
1058  // We establish the shared memory mapping here, only if we will have appropriate
1059  // mutex support (systemSupportsProcessSharing), then we:
1060  // Open the file and resize to some sane value if the file is too small.
1061  if (file.open(QIODevice::ReadWrite) &&
1062  (file.size() >= size ||
1063  (file.resize(size) && ensureFileAllocated(file.handle(), size))))
1064  {
1065  // Use mmap directly instead of QFile::map since the QFile (and its
1066  // shared mapping) will disappear unless we hang onto the QFile for no
1067  // reason (see the note below, we don't care about the file per se...)
1068  mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1069 
1070  // So... it is possible that someone else has mapped this cache already
1071  // with a larger size. If that's the case we need to at least match
1072  // the size to be able to access every entry, so fixup the mapping.
1073  if (mapAddress != MAP_FAILED) {
1074  SharedMemory *mapped = reinterpret_cast<SharedMemory *>(mapAddress);
1075 
1076  // First make sure that the version of the cache on disk is
1077  // valid. We also need to check that version != 0 to
1078  // disambiguate against an uninitialized cache.
1079  if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION &&
1080  mapped->version > 0)
1081  {
1082  kWarning(ksdcArea()) << "Deleting wrong version of cache" << cacheName;
1083 
1084  // CAUTION: Potentially recursive since the recovery
1085  // involves calling this function again.
1086  m_mapSize = size;
1087  shm = mapped;
1088  recoverCorruptedCache();
1089  return;
1090  }
1091  else if (mapped->cacheSize > cacheSize) {
1092  // This order is very important. We must save the cache size
1093  // before we remove the mapping, but unmap before overwriting
1094  // the previous mapping size...
1095  cacheSize = mapped->cacheSize;
1096  unsigned actualPageSize = mapped->cachePageSize();
1097  ::munmap(mapAddress, size);
1098  size = SharedMemory::totalSize(cacheSize, actualPageSize);
1099  mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1100  }
1101  }
1102  }
1103 
1104  // We could be here without the mapping established if:
1105  // 1) Process-shared synchronization is not supported, either at compile or run time,
1106  // 2) Unable to open the required file.
1107  // 3) Unable to resize the file to be large enough.
1108  // 4) Establishing the mapping failed.
1109  // 5) The mapping succeeded, but the size was wrong and we were unable to map when
1110  // we tried again.
1111  // 6) The incorrect version of the cache was detected.
1112  // 7) The file could be created, but posix_fallocate failed to commit it fully to disk.
1113  // In any of these cases, attempt to fallback to the
1114  // better-supported anonymous private page style of mmap. This memory won't
1115  // be shared, but our code will still work the same.
1116  // NOTE: We never use the on-disk representation independently of the
1117  // shared memory. If we don't get shared memory the disk info is ignored,
1118  // if we do get shared memory we never look at disk again.
1119  if (mapAddress == MAP_FAILED) {
1120  kWarning(ksdcArea()) << "Failed to establish shared memory mapping, will fallback"
1121  << "to private memory -- memory usage will increase";
1122 
1123  mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1124  }
1125 
1126  // Well now we're really hosed. We can still work, but we can't even cache
1127  // data.
1128  if (mapAddress == MAP_FAILED) {
1129  kError(ksdcArea()) << "Unable to allocate shared memory segment for shared data cache"
1130  << cacheName << "of size" << cacheSize;
1131  return;
1132  }
1133 
1134  m_mapSize = size;
1135 
1136  // We never actually construct shm, but we assign it the same address as the
1137  // shared memory we just mapped, so effectively shm is now a SharedMemory that
1138  // happens to be located at mapAddress.
1139  shm = reinterpret_cast<SharedMemory *>(mapAddress);
1140 
1141  // If we were first to create this memory map, all data will be 0.
1142  // Therefore if ready == 0 we're not initialized. A fully initialized
1143  // header will have ready == 2. Why?
1144  // Because 0 means "safe to initialize"
1145  // 1 means "in progress of initing"
1146  // 2 means "ready"
1147  uint usecSleepTime = 8; // Start by sleeping for 8 microseconds
1148  while (shm->ready != 2) {
1149  if (KDE_ISUNLIKELY(usecSleepTime >= (1 << 21))) {
1150  // Didn't acquire within ~8 seconds? Assume an issue exists
1151  kError(ksdcArea()) << "Unable to acquire shared lock, is the cache corrupt?";
1152 
1153  file.remove(); // Unlink the cache in case it's corrupt.
1154  detachFromSharedMemory();
1155  return; // Fallback to QCache (later)
1156  }
1157 
1158  if (shm->ready.testAndSetAcquire(0, 1)) {
1159  if (!shm->performInitialSetup(cacheSize, pageSize)) {
1160  kError(ksdcArea()) << "Unable to perform initial setup, this system probably "
1161  "does not really support process-shared pthreads or "
1162  "semaphores, even though it claims otherwise.";
1163 
1164  file.remove();
1165  detachFromSharedMemory();
1166  return;
1167  }
1168  }
1169  else {
1170  usleep(usecSleepTime); // spin
1171 
1172  // Exponential fallback as in Ethernet and similar collision resolution methods
1173  usecSleepTime *= 2;
1174  }
1175  }
1176 
1177  m_expectedType = shm->shmLock.type;
1178  m_lock = QSharedPointer<KSDCLock>(createLockFromId(m_expectedType, shm->shmLock));
1179  bool isProcessSharingSupported = false;
1180 
1181  if (!m_lock->initialize(isProcessSharingSupported)) {
1182  kError(ksdcArea()) << "Unable to setup shared cache lock, although it worked when created.";
1183  detachFromSharedMemory();
1184  }
1185  }
1186 
1187  // Called whenever the cache is apparently corrupt (for instance, a timeout trying to
1188  // lock the cache). In this situation it is safer just to destroy it all and try again.
1189  void recoverCorruptedCache()
1190  {
1191  KSharedDataCache::deleteCache(m_cacheName);
1192 
1193  detachFromSharedMemory();
1194 
1195  // Do this even if we weren't previously cached -- it might work now.
1196  mapSharedMemory();
1197  }
1198 
1199  // This should be called for any memory access to shared memory. This
1200  // function will verify that the bytes [base, base+accessLength) are
1201  // actually mapped to d->shm. The cache itself may have incorrect cache
1202  // page sizes, incorrect cache size, etc. so this function should be called
1203  // despite the cache data indicating it should be safe.
1204  //
1205  // If the access is /not/ safe then a KSDCCorrupted exception will be
1206  // thrown, so be ready to catch that.
1207  void verifyProposedMemoryAccess(const void *base, unsigned accessLength) const
1208  {
1209  quintptr startOfAccess = reinterpret_cast<quintptr>(base);
1210  quintptr startOfShm = reinterpret_cast<quintptr>(shm);
1211 
1212  if (KDE_ISUNLIKELY(startOfAccess < startOfShm)) {
1213  throw KSDCCorrupted();
1214  }
1215 
1216  quintptr endOfShm = startOfShm + m_mapSize;
1217  quintptr endOfAccess = startOfAccess + accessLength;
1218 
1219  // Check for unsigned integer wraparound, and then
1220  // bounds access
1221  if (KDE_ISUNLIKELY((endOfShm < startOfShm) ||
1222  (endOfAccess < startOfAccess) ||
1223  (endOfAccess > endOfShm)))
1224  {
1225  throw KSDCCorrupted();
1226  }
1227  }
1228 
1229  bool lock() const
1230  {
1231  if (KDE_ISLIKELY(shm && shm->shmLock.type == m_expectedType)) {
1232  return m_lock->lock();
1233  }
1234 
1235  // No shm or wrong type --> corrupt!
1236  throw KSDCCorrupted();
1237  }
1238 
1239  void unlock() const
1240  {
1241  m_lock->unlock();
1242  }
1243 
1244  class CacheLocker
1245  {
1246  mutable Private * d;
1247 
1248  bool cautiousLock()
1249  {
1250  int lockCount = 0;
1251 
1252  // Locking can fail due to a timeout. If it happens too often even though
1253  // we're taking corrective action assume there's some disastrous problem
1254  // and give up.
1255  while (!d->lock()) {
1256  d->recoverCorruptedCache();
1257 
1258  if (!d->shm) {
1259  kWarning(ksdcArea()) << "Lost the connection to shared memory for cache"
1260  << d->m_cacheName;
1261  return false;
1262  }
1263 
1264  if (lockCount++ > 4) {
1265  kError(ksdcArea()) << "There is a very serious problem with the KDE data cache"
1266  << d->m_cacheName << "giving up trying to access cache.";
1267  d->detachFromSharedMemory();
1268  return false;
1269  }
1270  }
1271 
1272  return true;
1273  }
1274 
1275  public:
1276  CacheLocker(const Private *_d) : d(const_cast<Private *>(_d))
1277  {
1278  if (KDE_ISUNLIKELY(!d || !d->shm || !cautiousLock())) {
1279  return;
1280  }
1281 
1282  uint testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
1283 
1284  // A while loop? Indeed, think what happens if this happens
1285  // twice -- hard to debug race conditions.
1286  while (testSize > d->m_mapSize) {
1287  kDebug(ksdcArea()) << "Someone enlarged the cache on us,"
1288  << "attempting to match new configuration.";
1289 
1290  // Protect against two threads accessing this same KSDC
1291  // from trying to execute the following remapping at the
1292  // same time.
1293  QMutexLocker d_locker(&d->m_threadLock);
1294  if (testSize == d->m_mapSize) {
1295  break; // Bail if the other thread already solved.
1296  }
1297 
1298  // Linux supports mremap, but it's not portable. So,
1299  // drop the map and (try to) re-establish.
1300  d->unlock();
1301 
1302 #ifdef KSDC_MSYNC_SUPPORTED
1303  ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1304 #endif
1305  ::munmap(d->shm, d->m_mapSize);
1306  d->m_mapSize = 0;
1307  d->shm = 0;
1308 
1309  QFile f(d->m_cacheName);
1310  if (!f.open(QFile::ReadWrite)) {
1311  kError(ksdcArea()) << "Unable to re-open cache, unfortunately"
1312  << "the connection had to be dropped for"
1313  << "crash safety -- things will be much"
1314  << "slower now.";
1315  return;
1316  }
1317 
1318  void *newMap = ::mmap(0, testSize, PROT_READ | PROT_WRITE,
1319  MAP_SHARED, f.handle(), 0);
1320  if (newMap == MAP_FAILED) {
1321  kError(ksdcArea()) << "Unopen to re-map the cache into memory"
1322  << "things will be much slower now";
1323  return;
1324  }
1325 
1326  d->shm = reinterpret_cast<SharedMemory *>(newMap);
1327  d->m_mapSize = testSize;
1328 
1329  if (!cautiousLock()) {
1330  return;
1331  }
1332 
1333  testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
1334  }
1335  }
1336 
1337  ~CacheLocker()
1338  {
1339  if (d && d->shm) {
1340  d->unlock();
1341  }
1342  }
1343 
1344  bool failed() const
1345  {
1346  return !d || d->shm == 0;
1347  }
1348  };
1349 
1350  QString m_cacheName;
1351  QMutex m_threadLock;
1352  SharedMemory *shm;
1353  QSharedPointer<KSDCLock> m_lock;
1354  uint m_mapSize;
1355  uint m_defaultCacheSize;
1356  uint m_expectedItemSize;
1357  SharedLockId m_expectedType;
1358 };
1359 
1360 // Must be called while the lock is already held!
1361 void SharedMemory::removeEntry(uint index)
1362 {
1363  if (index >= indexTableSize() || cacheAvail > pageTableSize()) {
1364  throw KSDCCorrupted();
1365  }
1366 
1367  PageTableEntry *pageTableEntries = pageTable();
1368  IndexTableEntry *entriesIndex = indexTable();
1369 
1370  // Update page table first
1371  pageID firstPage = entriesIndex[index].firstPage;
1372  if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) {
1373  kDebug(ksdcArea()) << "Trying to remove an entry which is already invalid. This "
1374  << "cache is likely corrupt.";
1375  throw KSDCCorrupted();
1376  }
1377 
1378  if (index != static_cast<uint>(pageTableEntries[firstPage].index)) {
1379  kError(ksdcArea()) << "Removing entry" << index << "but the matching data"
1380  << "doesn't link back -- cache is corrupt, clearing.";
1381  throw KSDCCorrupted();
1382  }
1383 
1384  uint entriesToRemove = intCeil(entriesIndex[index].totalItemSize, cachePageSize());
1385  uint savedCacheSize = cacheAvail;
1386  for (uint i = firstPage; i < pageTableSize() &&
1387  (uint) pageTableEntries[i].index == index; ++i)
1388  {
1389  pageTableEntries[i].index = -1;
1390  cacheAvail++;
1391  }
1392 
1393  if ((cacheAvail - savedCacheSize) != entriesToRemove) {
1394  kError(ksdcArea()) << "We somehow did not remove" << entriesToRemove
1395  << "when removing entry" << index << ", instead we removed"
1396  << (cacheAvail - savedCacheSize);
1397  throw KSDCCorrupted();
1398  }
1399 
1400  // For debugging
1401 #ifdef NDEBUG
1402  void *const startOfData = page(firstPage);
1403  if (startOfData) {
1404  QByteArray str((const char *) startOfData);
1405  str.prepend(" REMOVED: ");
1406  str.prepend(QByteArray::number(index));
1407  str.prepend("ENTRY ");
1408 
1409  ::memcpy(startOfData, str.constData(), str.size() + 1);
1410  }
1411 #endif
1412 
1413  // Update the index
1414  entriesIndex[index].fileNameHash = 0;
1415  entriesIndex[index].totalItemSize = 0;
1416  entriesIndex[index].useCount = 0;
1417  entriesIndex[index].lastUsedTime = 0;
1418  entriesIndex[index].addTime = 0;
1419  entriesIndex[index].firstPage = -1;
1420 }
1421 
1422 KSharedDataCache::KSharedDataCache(const QString &cacheName,
1423  unsigned defaultCacheSize,
1424  unsigned expectedItemSize)
1425  : d(0)
1426 {
1427  try {
1428  d = new Private(cacheName, defaultCacheSize, expectedItemSize);
1429  }
1430  catch(KSDCCorrupted) {
1431  KSharedDataCache::deleteCache(cacheName);
1432 
1433  // Try only once more
1434  try {
1435  d = new Private(cacheName, defaultCacheSize, expectedItemSize);
1436  }
1437  catch(KSDCCorrupted) {
1438  kError(ksdcArea())
1439  << "Even a brand-new cache starts off corrupted, something is"
1440  << "seriously wrong. :-(";
1441  d = 0; // Just in case
1442  }
1443  }
1444 }
1445 
1446 KSharedDataCache::~KSharedDataCache()
1447 {
1448  // Note that there is no other actions required to separate from the
1449  // shared memory segment, simply unmapping is enough. This makes things
1450  // *much* easier so I'd recommend maintaining this ideal.
1451  if (!d) {
1452  return;
1453  }
1454 
1455  if (d->shm) {
1456 #ifdef KSDC_MSYNC_SUPPORTED
1457  ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1458 #endif
1459  ::munmap(d->shm, d->m_mapSize);
1460  }
1461 
1462  // Do not delete d->shm, it was never constructed, it's just an alias.
1463  d->shm = 0;
1464 
1465  delete d;
1466 }
1467 
1468 bool KSharedDataCache::insert(const QString &key, const QByteArray &data)
1469 {
1470  try {
1471  Private::CacheLocker lock(d);
1472  if (lock.failed()) {
1473  return false;
1474  }
1475 
1476  QByteArray encodedKey = key.toUtf8();
1477  uint keyHash = generateHash(encodedKey);
1478  uint position = keyHash % d->shm->indexTableSize();
1479 
1480  // See if we're overwriting an existing entry.
1481  IndexTableEntry *indices = d->shm->indexTable();
1482 
1483  // In order to avoid the issue of a very long-lived cache having items
1484  // with a use count of 1 near-permanently, we attempt to artifically
1485  // reduce the use count of long-lived items when there is high load on
1486  // the cache. We do this randomly, with a weighting that makes the event
1487  // impossible if load < 0.5, and guaranteed if load >= 0.96.
1488  const static double startCullPoint = 0.5l;
1489  const static double mustCullPoint = 0.96l;
1490 
1491  // cacheAvail is in pages, cacheSize is in bytes.
1492  double loadFactor = 1.0 - (1.0l * d->shm->cacheAvail * d->shm->cachePageSize()
1493  / d->shm->cacheSize);
1494  bool cullCollisions = false;
1495 
1496  if (KDE_ISUNLIKELY(loadFactor >= mustCullPoint)) {
1497  cullCollisions = true;
1498  }
1499  else if (loadFactor > startCullPoint) {
1500  const int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint);
1501  if (KRandom::random() >= tripWireValue) {
1502  cullCollisions = true;
1503  }
1504  }
1505 
1506  // In case of collisions in the index table (i.e. identical positions), use
1507  // quadratic chaining to attempt to find an empty slot. The equation we use
1508  // is:
1509  // position = (hash + (i + i*i) / 2) % size, where i is the probe number.
1510  uint probeNumber = 1;
1511  while (indices[position].useCount > 0 && probeNumber < MAX_PROBE_COUNT) {
1512  // If we actually stumbled upon an old version of the key we are
1513  // overwriting, then use that position, do not skip over it.
1514 
1515  if (KDE_ISUNLIKELY(indices[position].fileNameHash == keyHash)) {
1516  break;
1517  }
1518 
1519  // If we are "culling" old entries, see if this one is old and if so
1520  // reduce its use count. If it reduces to zero then eliminate it and
1521  // use its old spot.
1522 
1523  if (cullCollisions && (::time(0) - indices[position].lastUsedTime) > 60) {
1524  indices[position].useCount >>= 1;
1525  if (indices[position].useCount == 0) {
1526  kDebug(ksdcArea()) << "Overwriting existing old cached entry due to collision.";
1527  d->shm->removeEntry(position); // Remove it first
1528 
1529  break;
1530  }
1531  }
1532 
1533  position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
1534  % d->shm->indexTableSize();
1535  probeNumber++;
1536  }
1537 
1538  if (indices[position].useCount > 0 && indices[position].firstPage >= 0) {
1539  kDebug(ksdcArea()) << "Overwriting existing cached entry due to collision.";
1540  d->shm->removeEntry(position); // Remove it first
1541  }
1542 
1543  // Data will be stored as fileNamefoo\0PNGimagedata.....
1544  // So total size required is the length of the encoded file name + 1
1545  // for the trailing null, and then the length of the image data.
1546  uint fileNameLength = 1 + encodedKey.length();
1547  uint requiredSize = fileNameLength + data.size();
1548  uint pagesNeeded = intCeil(requiredSize, d->shm->cachePageSize());
1549  uint firstPage = (uint) -1;
1550 
1551  if (pagesNeeded >= d->shm->pageTableSize()) {
1552  kWarning(ksdcArea()) << key << "is too large to be cached.";
1553  return false;
1554  }
1555 
1556  // If the cache has no room, or the fragmentation is too great to find
1557  // the required number of consecutive free pages, take action.
1558  if (pagesNeeded > d->shm->cacheAvail ||
1559  (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize())
1560  {
1561  // If we have enough free space just defragment
1562  uint freePagesDesired = 3 * qMax(1u, pagesNeeded / 2);
1563 
1564  if (d->shm->cacheAvail > freePagesDesired) {
1565  // TODO: How the hell long does this actually take on real
1566  // caches?
1567  d->shm->defragment();
1568  firstPage = d->shm->findEmptyPages(pagesNeeded);
1569  }
1570  else {
1571  // If we already have free pages we don't want to remove a ton
1572  // extra. However we can't rely on the return value of
1573  // removeUsedPages giving us a good location since we're not
1574  // passing in the actual number of pages that we need.
1575  d->shm->removeUsedPages(qMin(2 * freePagesDesired, d->shm->pageTableSize())
1576  - d->shm->cacheAvail);
1577  firstPage = d->shm->findEmptyPages(pagesNeeded);
1578  }
1579 
1580  if (firstPage >= d->shm->pageTableSize() ||
1581  d->shm->cacheAvail < pagesNeeded)
1582  {
1583  kError(ksdcArea()) << "Unable to free up memory for" << key;
1584  return false;
1585  }
1586  }
1587 
1588  // Update page table
1589  PageTableEntry *table = d->shm->pageTable();
1590  for (uint i = 0; i < pagesNeeded; ++i) {
1591  table[firstPage + i].index = position;
1592  }
1593 
1594  // Update index
1595  indices[position].fileNameHash = keyHash;
1596  indices[position].totalItemSize = requiredSize;
1597  indices[position].useCount = 1;
1598  indices[position].addTime = ::time(0);
1599  indices[position].lastUsedTime = indices[position].addTime;
1600  indices[position].firstPage = firstPage;
1601 
1602  // Update cache
1603  d->shm->cacheAvail -= pagesNeeded;
1604 
1605  // Actually move the data in place
1606  void *dataPage = d->shm->page(firstPage);
1607  if (KDE_ISUNLIKELY(!dataPage)) {
1608  throw KSDCCorrupted();
1609  }
1610 
1611  // Verify it will all fit
1612  d->verifyProposedMemoryAccess(dataPage, requiredSize);
1613 
1614  // Cast for byte-sized pointer arithmetic
1615  uchar *startOfPageData = reinterpret_cast<uchar *>(dataPage);
1616  ::memcpy(startOfPageData, encodedKey.constData(), fileNameLength);
1617  ::memcpy(startOfPageData + fileNameLength, data.constData(), data.size());
1618 
1619  return true;
1620  }
1621  catch(KSDCCorrupted) {
1622  d->recoverCorruptedCache();
1623  return false;
1624  }
1625 }
1626 
1627 bool KSharedDataCache::find(const QString &key, QByteArray *destination) const
1628 {
1629  try {
1630  Private::CacheLocker lock(d);
1631  if (lock.failed()) {
1632  return false;
1633  }
1634 
1635  // Search in the index for our data, hashed by key;
1636  QByteArray encodedKey = key.toUtf8();
1637  qint32 entry = d->shm->findNamedEntry(encodedKey);
1638 
1639  if (entry >= 0) {
1640  const IndexTableEntry *header = &d->shm->indexTable()[entry];
1641  const void *resultPage = d->shm->page(header->firstPage);
1642  if (KDE_ISUNLIKELY(!resultPage)) {
1643  throw KSDCCorrupted();
1644  }
1645 
1646  d->verifyProposedMemoryAccess(resultPage, header->totalItemSize);
1647 
1648  header->useCount++;
1649  header->lastUsedTime = ::time(0);
1650 
1651  // Our item is the key followed immediately by the data, so skip
1652  // past the key.
1653  const char *cacheData = reinterpret_cast<const char *>(resultPage);
1654  cacheData += encodedKey.size();
1655  cacheData++; // Skip trailing null -- now we're pointing to start of data
1656 
1657  if (destination) {
1658  *destination = QByteArray(cacheData, header->totalItemSize - encodedKey.size() - 1);
1659  }
1660 
1661  return true;
1662  }
1663  }
1664  catch(KSDCCorrupted) {
1665  d->recoverCorruptedCache();
1666  }
1667 
1668  return false;
1669 }
1670 
1671 void KSharedDataCache::clear()
1672 {
1673  try {
1674  Private::CacheLocker lock(d);
1675 
1676  if(!lock.failed()) {
1677  d->shm->clear();
1678  }
1679  }
1680  catch(KSDCCorrupted) {
1681  d->recoverCorruptedCache();
1682  }
1683 }
1684 
1685 bool KSharedDataCache::contains(const QString &key) const
1686 {
1687  try {
1688  Private::CacheLocker lock(d);
1689  if (lock.failed()) {
1690  return false;
1691  }
1692 
1693  return d->shm->findNamedEntry(key.toUtf8()) >= 0;
1694  }
1695  catch(KSDCCorrupted) {
1696  d->recoverCorruptedCache();
1697  return false;
1698  }
1699 }
1700 
1701 void KSharedDataCache::deleteCache(const QString &cacheName)
1702 {
1703  QString cachePath = KGlobal::dirs()->locateLocal("cache", cacheName + QLatin1String(".kcache"));
1704 
1705  // Note that it is important to simply unlink the file, and not truncate it
1706  // smaller first to avoid SIGBUS errors and similar with shared memory
1707  // attached to the underlying inode.
1708  kDebug(ksdcArea()) << "Removing cache at" << cachePath;
1709  QFile::remove(cachePath);
1710 }
1711 
1712 unsigned KSharedDataCache::totalSize() const
1713 {
1714  try {
1715  Private::CacheLocker lock(d);
1716  if (lock.failed()) {
1717  return 0u;
1718  }
1719 
1720  return d->shm->cacheSize;
1721  }
1722  catch(KSDCCorrupted) {
1723  d->recoverCorruptedCache();
1724  return 0u;
1725  }
1726 }
1727 
1728 unsigned KSharedDataCache::freeSize() const
1729 {
1730  try {
1731  Private::CacheLocker lock(d);
1732  if (lock.failed()) {
1733  return 0u;
1734  }
1735 
1736  return d->shm->cacheAvail * d->shm->cachePageSize();
1737  }
1738  catch(KSDCCorrupted) {
1739  d->recoverCorruptedCache();
1740  return 0u;
1741  }
1742 }
1743 
1744 KSharedDataCache::EvictionPolicy KSharedDataCache::evictionPolicy() const
1745 {
1746  if (d && d->shm) {
1747  return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(0));
1748  }
1749 
1750  return NoEvictionPreference;
1751 }
1752 
1753 void KSharedDataCache::setEvictionPolicy(EvictionPolicy newPolicy)
1754 {
1755  if (d && d->shm) {
1756  d->shm->evictionPolicy.fetchAndStoreRelease(static_cast<int>(newPolicy));
1757  }
1758 }
1759 
1760 unsigned KSharedDataCache::timestamp() const
1761 {
1762  if (d && d->shm) {
1763  return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(0));
1764  }
1765 
1766  return 0;
1767 }
1768 
1769 void KSharedDataCache::setTimestamp(unsigned newTimestamp)
1770 {
1771  if (d && d->shm) {
1772  d->shm->cacheTimestamp.fetchAndStoreRelease(static_cast<int>(newTimestamp));
1773  }
1774 }
This file is part of the KDE documentation.
Documentation copyright © 1996-2013 The KDE developers.
Generated on Sat Jun 1 2013 12:05:03 by doxygen 1.8.1.1 written by Dimitri van Heesch, © 1997-2006

KDE's Doxygen guidelines are available online.

KDECore

Skip menu "KDECore"
  • Main Page
  • Namespace List
  • Namespace Members
  • Alphabetical List
  • Class List
  • Class Hierarchy
  • Class Members
  • File List
  • File Members
  • Modules
  • Related Pages

kdelibs-4.10.4 API Reference

Skip menu "kdelibs-4.10.4 API Reference"
  • DNSSD
  • Interfaces
  •   KHexEdit
  •   KMediaPlayer
  •   KSpeech
  •   KTextEditor
  • kconf_update
  • KDE3Support
  •   KUnitTest
  • KDECore
  • KDED
  • KDEsu
  • KDEUI
  • KDEWebKit
  • KDocTools
  • KFile
  • KHTML
  • KImgIO
  • KInit
  • kio
  • KIOSlave
  • KJS
  •   KJS-API
  •   WTF
  • kjsembed
  • KNewStuff
  • KParts
  • KPty
  • Kross
  • KUnitConversion
  • KUtils
  • Nepomuk
  • Plasma
  • Solid
  • Sonnet
  • ThreadWeaver
Report problems with this website to our bug tracking system.
Contact the specific authors with questions and comments about the page contents.

KDE® and the K Desktop Environment® logo are registered trademarks of KDE e.V. | Legal