• Skip to content
  • Skip to link menu
  • KDE API Reference
  • kdelibs-4.11.5 API Reference
  • KDE Home
  • Contact Us
 

KDECore

  • kdecore
  • util
kshareddatacache.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of the KDE project.
3  * Copyright © 2010, 2012 Michael Pyne <mpyne@kde.org>
4  * Copyright © 2012 Ralf Jung <ralfjung-e@gmx.de>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Library General Public
8  * License version 2 as published by the Free Software Foundation.
9  *
10  * This library includes "MurmurHash" code from Austin Appleby, which is
11  * placed in the public domain. See http://sites.google.com/site/murmurhash/
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Library General Public License for more details.
17  *
18  * You should have received a copy of the GNU Library General Public License
19  * along with this library; see the file COPYING.LIB. If not, write to
20  * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
21  * Boston, MA 02110-1301, USA.
22  */
23 
24 #include "kshareddatacache.h"
25 #include "kshareddatacache_p.h" // Various auxiliary support code
26 
27 #include <kdebug.h>
28 #include <kglobal.h>
29 #include <kstandarddirs.h>
30 #include <krandom.h>
31 
32 #include <QtCore/QDateTime>
33 #include <QtCore/QSharedPointer>
34 #include <QtCore/QByteArray>
35 #include <QtCore/QFile>
36 #include <QtCore/QAtomicInt>
37 #include <QtCore/QList>
38 #include <QtCore/QMutex>
39 #include <QtCore/QMutexLocker>
40 
41 #include <sys/types.h>
42 #include <sys/mman.h>
43 #include <stdlib.h>
44 
47 static const uint MAX_PROBE_COUNT = 6;
48 
49 int ksdcArea()
50 {
51  static int s_ksdcArea = KDebug::registerArea("KSharedDataCache", false);
52  return s_ksdcArea;
53 }
54 
63 class KSDCCorrupted
64 {
65  public:
66  KSDCCorrupted()
67  {
68  kError(ksdcArea()) << "Error detected in cache, re-generating";
69  }
70 };
71 
72 //-----------------------------------------------------------------------------
73 // MurmurHashAligned, by Austin Appleby
74 // (Released to the public domain, or licensed under the MIT license where
75 // software may not be released to the public domain. See
76 // http://sites.google.com/site/murmurhash/)
77 
78 // Same algorithm as MurmurHash, but only does aligned reads - should be safer
79 // on certain platforms.
80 static unsigned int MurmurHashAligned(const void *key, int len, unsigned int seed)
81 {
82  const unsigned int m = 0xc6a4a793;
83  const int r = 16;
84 
85  const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
86 
87  unsigned int h = seed ^ (len * m);
88 
89  int align = reinterpret_cast<quintptr>(data) & 3;
90 
91  if(align & (len >= 4))
92  {
93  // Pre-load the temp registers
94 
95  unsigned int t = 0, d = 0;
96 
97  switch(align)
98  {
99  case 1: t |= data[2] << 16;
100  case 2: t |= data[1] << 8;
101  case 3: t |= data[0];
102  }
103 
104  t <<= (8 * align);
105 
106  data += 4-align;
107  len -= 4-align;
108 
109  int sl = 8 * (4-align);
110  int sr = 8 * align;
111 
112  // Mix
113 
114  while(len >= 4)
115  {
116  d = *reinterpret_cast<const unsigned int *>(data);
117  t = (t >> sr) | (d << sl);
118  h += t;
119  h *= m;
120  h ^= h >> r;
121  t = d;
122 
123  data += 4;
124  len -= 4;
125  }
126 
127  // Handle leftover data in temp registers
128 
129  int pack = len < align ? len : align;
130 
131  d = 0;
132 
133  switch(pack)
134  {
135  case 3: d |= data[2] << 16;
136  case 2: d |= data[1] << 8;
137  case 1: d |= data[0];
138  case 0: h += (t >> sr) | (d << sl);
139  h *= m;
140  h ^= h >> r;
141  }
142 
143  data += pack;
144  len -= pack;
145  }
146  else
147  {
148  while(len >= 4)
149  {
150  h += *reinterpret_cast<const unsigned int *>(data);
151  h *= m;
152  h ^= h >> r;
153 
154  data += 4;
155  len -= 4;
156  }
157  }
158 
159  //----------
160  // Handle tail bytes
161 
162  switch(len)
163  {
164  case 3: h += data[2] << 16;
165  case 2: h += data[1] << 8;
166  case 1: h += data[0];
167  h *= m;
168  h ^= h >> r;
169  };
170 
171  h *= m;
172  h ^= h >> 10;
173  h *= m;
174  h ^= h >> 17;
175 
176  return h;
177 }
178 
183 static quint32 generateHash(const QByteArray &buffer)
184 {
185  // The final constant is the "seed" for MurmurHash. Do *not* change it
186  // without incrementing the cache version.
187  return MurmurHashAligned(buffer.data(), buffer.size(), 0xF0F00F0F);
188 }
189 
190 // Alignment concerns become a big deal when we're dealing with shared memory,
191 // since trying to access a structure sized at, say 8 bytes at an address that
192 // is not evenly divisible by 8 is a crash-inducing error on some
193 // architectures. The compiler would normally take care of this, but with
194 // shared memory the compiler will not necessarily know the alignment expected,
195 // so make sure we account for this ourselves. To do so we need a way to find
196 // out the expected alignment. Enter ALIGNOF...
197 #ifndef ALIGNOF
198 #if defined(Q_CC_GNU) || defined(Q_CC_SUN)
199 #define ALIGNOF(x) (__alignof__ (x)) // GCC provides what we want directly
200 #else
201 
202 #include <stddef.h> // offsetof
203 
204 template<class T>
205 struct __alignmentHack
206 {
207  char firstEntry;
208  T obj;
209  static const size_t size = offsetof(__alignmentHack, obj);
210 };
211 #define ALIGNOF(x) (__alignmentHack<x>::size)
212 #endif // Non gcc
213 #endif // ALIGNOF undefined
214 
215 // Returns a pointer properly aligned to handle size alignment.
216 // size should be a power of 2. start is assumed to be the lowest
217 // permissible address, therefore the return value will be >= start.
218 template<class T>
219 T* alignTo(const void *start, uint size = ALIGNOF(T))
220 {
221  quintptr mask = size - 1;
222 
223  // Cast to int-type to handle bit-twiddling
224  quintptr basePointer = reinterpret_cast<quintptr>(start);
225 
226  // If (and only if) we are already aligned, adding mask into basePointer
227  // will not increment any of the bits in ~mask and we get the right answer.
228  basePointer = (basePointer + mask) & ~mask;
229 
230  return reinterpret_cast<T *>(basePointer);
231 }
232 
239 template<class T>
240 const T *offsetAs(const void *const base, qint32 offset)
241 {
242  const char *ptr = reinterpret_cast<const char*>(base);
243  return alignTo<const T>(ptr + offset);
244 }
245 
246 // Same as above, but for non-const objects
247 template<class T>
248 T *offsetAs(void *const base, qint32 offset)
249 {
250  char *ptr = reinterpret_cast<char *>(base);
251  return alignTo<T>(ptr + offset);
252 }
253 
259 static unsigned intCeil(unsigned a, unsigned b)
260 {
261  // The overflow check is unsigned and so is actually defined behavior.
262  if (KDE_ISUNLIKELY(b == 0 || ((a + b) < a)))
263  {
264  throw KSDCCorrupted();
265  }
266 
267  return (a + b - 1) / b;
268 }
269 
273 static unsigned countSetBits(unsigned value)
274 {
275  // K&R / Wegner's algorithm used. GCC supports __builtin_popcount but we
276  // expect there to always be only 1 bit set so this should be perhaps a bit
277  // faster 99.9% of the time.
278  unsigned count = 0;
279  for (count = 0; value != 0; count++) {
280  value &= (value - 1); // Clears least-significant set bit.
281  }
282  return count;
283 }
284 
285 typedef qint32 pageID;
286 
287 // =========================================================================
288 // Description of the cache:
289 //
290 // The shared memory cache is designed to be handled as two separate objects,
291 // all contained in the same global memory segment. First off, there is the
292 // basic header data, consisting of the global header followed by the
293 // accounting data necessary to hold items (described in more detail
294 // momentarily). Following the accounting data is the start of the "page table"
295 // (essentially just as you'd see it in an Operating Systems text).
296 //
297 // The page table contains shared memory split into fixed-size pages, with a
298 // configurable page size. In the event that the data is too large to fit into
299 // a single logical page, it will need to occupy consecutive pages of memory.
300 //
301 // The accounting data that was referenced earlier is split into two:
302 //
303 // 1. index table, containing a fixed-size list of possible cache entries.
304 // Each index entry is of type IndexTableEntry (below), and holds the various
305 // accounting data and a pointer to the first page.
306 //
307 // 2. page table, which is used to speed up the process of searching for
308 // free pages of memory. There is one entry for every page in the page table,
309 // and it contains the index of the one entry in the index table actually
310 // holding the page (or <0 if the page is free).
311 //
312 // The entire segment looks like so:
313 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══?
314 // ? Header │ Index Table │ Page Table ? Pages │ │ │ │...?
315 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══?
316 // =========================================================================
317 
318 // All elements of this struct must be "plain old data" (POD) types since it
319 // will be in shared memory. In addition, no pointers! To point to something
320 // you must use relative offsets since the pointer start addresses will be
321 // different in each process.
322 struct IndexTableEntry
323 {
324  uint fileNameHash;
325  uint totalItemSize; // in bytes
326  mutable uint useCount;
327  time_t addTime;
328  mutable time_t lastUsedTime;
329  pageID firstPage;
330 };
331 
332 // Page table entry
333 struct PageTableEntry
334 {
335  // int so we can use values <0 for unassigned pages.
336  qint32 index;
337 };
338 
339 // Each individual page contains the cached data. The first page starts off with
340 // the utf8-encoded key, a null '\0', and then the data follows immediately
341 // from the next byte, possibly crossing consecutive page boundaries to hold
342 // all of the data.
343 // There is, however, no specific struct for a page, it is simply a location in
344 // memory.
345 
346 // This is effectively the layout of the shared memory segment. The variables
347 // contained within form the header, data contained afterwards is pointed to
348 // by using special accessor functions.
349 struct SharedMemory
350 {
358  enum {
359  PIXMAP_CACHE_VERSION = 12,
360  MINIMUM_CACHE_SIZE = 4096
361  };
362 
363  // Note to those who follow me. You should not, under any circumstances, ever
364  // re-arrange the following two fields, even if you change the version number
365  // for later revisions of this code.
366  QAtomicInt ready;
367  quint8 version;
368 
369  // See kshareddatacache_p.h
370  SharedLock shmLock;
371 
372  uint cacheSize;
373  uint cacheAvail;
374  QAtomicInt evictionPolicy;
375 
376  // pageSize and cacheSize determine the number of pages. The number of
377  // pages determine the page table size and (indirectly) the index table
378  // size.
379  QAtomicInt pageSize;
380 
381  // This variable is added to reserve space for later cache timestamping
382  // support. The idea is this variable will be updated when the cache is
383  // written to, to allow clients to detect a changed cache quickly.
384  QAtomicInt cacheTimestamp;
385 
389  static unsigned equivalentPageSize(unsigned itemSize)
390  {
391  if (itemSize == 0) {
392  return 4096; // Default average item size.
393  }
394 
395  int log2OfSize = 0;
396  while ((itemSize >>= 1) != 0) {
397  log2OfSize++;
398  }
399 
400  // Bound page size between 512 bytes and 256 KiB.
401  // If this is adjusted, also alter validSizeMask in cachePageSize
402  log2OfSize = qBound(9, log2OfSize, 18);
403 
404  return (1 << log2OfSize);
405  }
406 
407  // Returns pageSize in unsigned format.
408  unsigned cachePageSize() const
409  {
410  unsigned _pageSize = static_cast<unsigned>(pageSize);
411  // bits 9-18 may be set.
412  static const unsigned validSizeMask = 0x7FE00u;
413 
414  // Check for page sizes that are not a power-of-2, or are too low/high.
415  if (KDE_ISUNLIKELY(countSetBits(_pageSize) != 1 || (_pageSize & ~validSizeMask))) {
416  throw KSDCCorrupted();
417  }
418 
419  return _pageSize;
420  }
421 
434  bool performInitialSetup(uint _cacheSize, uint _pageSize)
435  {
436  if (_cacheSize < MINIMUM_CACHE_SIZE) {
437  kError(ksdcArea()) << "Internal error: Attempted to create a cache sized < "
438  << MINIMUM_CACHE_SIZE;
439  return false;
440  }
441 
442  if (_pageSize == 0) {
443  kError(ksdcArea()) << "Internal error: Attempted to create a cache with 0-sized pages.";
444  return false;
445  }
446 
447  shmLock.type = findBestSharedLock();
448  if (shmLock.type == LOCKTYPE_INVALID) {
449  kError(ksdcArea()) << "Unable to find an appropriate lock to guard the shared cache. "
450  << "This *should* be essentially impossible. :(";
451  return false;
452  }
453 
454  bool isProcessShared = false;
455  QSharedPointer<KSDCLock> tempLock(createLockFromId(shmLock.type, shmLock));
456 
457  if (!tempLock->initialize(isProcessShared)) {
458  kError(ksdcArea()) << "Unable to initialize the lock for the cache!";
459  return false;
460  }
461 
462  if (!isProcessShared) {
463  kWarning(ksdcArea()) << "Cache initialized, but does not support being"
464  << "shared across processes.";
465  }
466 
467  // These must be updated to make some of our auxiliary functions
468  // work right since their values will be based on the cache size.
469  cacheSize = _cacheSize;
470  pageSize = _pageSize;
471  version = PIXMAP_CACHE_VERSION;
472  cacheTimestamp = static_cast<unsigned>(::time(0));
473 
474  clearInternalTables();
475 
476  // Unlock the mini-lock, and introduce a total memory barrier to make
477  // sure all changes have propagated even without a mutex.
478  ready.ref();
479 
480  return true;
481  }
482 
483  void clearInternalTables()
484  {
485  // Assumes we're already locked somehow.
486  cacheAvail = pageTableSize();
487 
488  // Setup page tables to point nowhere
489  PageTableEntry *table = pageTable();
490  for (uint i = 0; i < pageTableSize(); ++i) {
491  table[i].index = -1;
492  }
493 
494  // Setup index tables to be accurate.
495  IndexTableEntry *indices = indexTable();
496  for (uint i = 0; i < indexTableSize(); ++i) {
497  indices[i].firstPage = -1;
498  indices[i].useCount = 0;
499  indices[i].fileNameHash = 0;
500  indices[i].totalItemSize = 0;
501  indices[i].addTime = 0;
502  indices[i].lastUsedTime = 0;
503  }
504  }
505 
506  const IndexTableEntry *indexTable() const
507  {
508  // Index Table goes immediately after this struct, at the first byte
509  // where alignment constraints are met (accounted for by offsetAs).
510  return offsetAs<IndexTableEntry>(this, sizeof(*this));
511  }
512 
513  const PageTableEntry *pageTable() const
514  {
515  const IndexTableEntry *base = indexTable();
516  base += indexTableSize();
517 
518  // Let's call wherever we end up the start of the page table...
519  return alignTo<PageTableEntry>(base);
520  }
521 
522  const void *cachePages() const
523  {
524  const PageTableEntry *tableStart = pageTable();
525  tableStart += pageTableSize();
526 
527  // Let's call wherever we end up the start of the data...
528  return alignTo<void>(tableStart, cachePageSize());
529  }
530 
531  const void *page(pageID at) const
532  {
533  if (static_cast<uint>(at) >= pageTableSize()) {
534  return 0;
535  }
536 
537  // We must manually calculate this one since pageSize varies.
538  const char *pageStart = reinterpret_cast<const char *>(cachePages());
539  pageStart += (at * cachePageSize());
540 
541  return reinterpret_cast<const void *>(pageStart);
542  }
543 
544  // The following are non-const versions of some of the methods defined
545  // above. They use const_cast<> because I feel that is better than
546  // duplicating the code. I suppose template member functions (?)
547  // may work, may investigate later.
548  IndexTableEntry *indexTable()
549  {
550  const SharedMemory *that = const_cast<const SharedMemory*>(this);
551  return const_cast<IndexTableEntry *>(that->indexTable());
552  }
553 
554  PageTableEntry *pageTable()
555  {
556  const SharedMemory *that = const_cast<const SharedMemory*>(this);
557  return const_cast<PageTableEntry *>(that->pageTable());
558  }
559 
560  void *cachePages()
561  {
562  const SharedMemory *that = const_cast<const SharedMemory*>(this);
563  return const_cast<void *>(that->cachePages());
564  }
565 
566  void *page(pageID at)
567  {
568  const SharedMemory *that = const_cast<const SharedMemory*>(this);
569  return const_cast<void *>(that->page(at));
570  }
571 
572  uint pageTableSize() const
573  {
574  return cacheSize / cachePageSize();
575  }
576 
577  uint indexTableSize() const
578  {
579  // Assume 2 pages on average are needed -> the number of entries
580  // would be half of the number of pages.
581  return pageTableSize() / 2;
582  }
583 
588  pageID findEmptyPages(uint pagesNeeded) const
589  {
590  if (KDE_ISUNLIKELY(pagesNeeded > pageTableSize())) {
591  return pageTableSize();
592  }
593 
594  // Loop through the page table, find the first empty page, and just
595  // makes sure that there are enough free pages.
596  const PageTableEntry *table = pageTable();
597  uint contiguousPagesFound = 0;
598  pageID base = 0;
599  for (pageID i = 0; i < static_cast<int>(pageTableSize()); ++i) {
600  if (table[i].index < 0) {
601  if (contiguousPagesFound == 0) {
602  base = i;
603  }
604  contiguousPagesFound++;
605  }
606  else {
607  contiguousPagesFound = 0;
608  }
609 
610  if (contiguousPagesFound == pagesNeeded) {
611  return base;
612  }
613  }
614 
615  return pageTableSize();
616  }
617 
618  // left < right?
619  static bool lruCompare(const IndexTableEntry &l, const IndexTableEntry &r)
620  {
621  // Ensure invalid entries migrate to the end
622  if (l.firstPage < 0 && r.firstPage >= 0) {
623  return false;
624  }
625  if (l.firstPage >= 0 && r.firstPage < 0) {
626  return true;
627  }
628 
629  // Most recently used will have the highest absolute time =>
630  // least recently used (lowest) should go first => use left < right
631  return l.lastUsedTime < r.lastUsedTime;
632  }
633 
634  // left < right?
635  static bool seldomUsedCompare(const IndexTableEntry &l, const IndexTableEntry &r)
636  {
637  // Ensure invalid entries migrate to the end
638  if (l.firstPage < 0 && r.firstPage >= 0) {
639  return false;
640  }
641  if (l.firstPage >= 0 && r.firstPage < 0) {
642  return true;
643  }
644 
645  // Put lowest use count at start by using left < right
646  return l.useCount < r.useCount;
647  }
648 
649  // left < right?
650  static bool ageCompare(const IndexTableEntry &l, const IndexTableEntry &r)
651  {
652  // Ensure invalid entries migrate to the end
653  if (l.firstPage < 0 && r.firstPage >= 0) {
654  return false;
655  }
656  if (l.firstPage >= 0 && r.firstPage < 0) {
657  return true;
658  }
659 
660  // Oldest entries die first -- they have the lowest absolute add time,
661  // so just like the others use left < right
662  return l.addTime < r.addTime;
663  }
664 
665  void defragment()
666  {
667  if (cacheAvail * cachePageSize() == cacheSize) {
668  return; // That was easy
669  }
670 
671  kDebug(ksdcArea()) << "Defragmenting the shared cache";
672 
673  // Just do a linear scan, and anytime there is free space, swap it
674  // with the pages to its right. In order to meet the precondition
675  // we need to skip any used pages first.
676 
677  pageID currentPage = 0;
678  pageID idLimit = static_cast<pageID>(pageTableSize());
679  PageTableEntry *pages = pageTable();
680 
681  if (KDE_ISUNLIKELY(!pages || idLimit <= 0)) {
682  throw KSDCCorrupted();
683  }
684 
685  // Skip used pages
686  while (currentPage < idLimit && pages[currentPage].index >= 0) {
687  ++currentPage;
688  }
689 
690  pageID freeSpot = currentPage;
691 
692  // Main loop, starting from a free page, skip to the used pages and
693  // move them back.
694  while (currentPage < idLimit) {
695  // Find the next used page
696  while (currentPage < idLimit && pages[currentPage].index < 0) {
697  ++currentPage;
698  }
699 
700  if (currentPage >= idLimit) {
701  break;
702  }
703 
704  // Found an entry, move it.
705  qint32 affectedIndex = pages[currentPage].index;
706  if (KDE_ISUNLIKELY(affectedIndex < 0 ||
707  affectedIndex >= idLimit ||
708  indexTable()[affectedIndex].firstPage != currentPage))
709  {
710  throw KSDCCorrupted();
711  }
712 
713  indexTable()[affectedIndex].firstPage = freeSpot;
714 
715  // Moving one page at a time guarantees we can use memcpy safely
716  // (in other words, the source and destination will not overlap).
717  while (currentPage < idLimit && pages[currentPage].index >= 0) {
718  const void *const sourcePage = page(currentPage);
719  void *const destinationPage = page(freeSpot);
720 
721  if(KDE_ISUNLIKELY(!sourcePage || !destinationPage)) {
722  throw KSDCCorrupted();
723  }
724 
725  ::memcpy(destinationPage, sourcePage, cachePageSize());
726  pages[freeSpot].index = affectedIndex;
727  pages[currentPage].index = -1;
728  ++currentPage;
729  ++freeSpot;
730 
731  // If we've just moved the very last page and it happened to
732  // be at the very end of the cache then we're done.
733  if (currentPage >= idLimit) {
734  break;
735  }
736 
737  // We're moving consecutive used pages whether they belong to
738  // our affected entry or not, so detect if we've started moving
739  // the data for a different entry and adjust if necessary.
740  if (affectedIndex != pages[currentPage].index) {
741  indexTable()[pages[currentPage].index].firstPage = freeSpot;
742  }
743  affectedIndex = pages[currentPage].index;
744  }
745 
746  // At this point currentPage is on a page that is unused, and the
747  // cycle repeats. However, currentPage is not the first unused
748  // page, freeSpot is, so leave it alone.
749  }
750  }
751 
758  qint32 findNamedEntry(const QByteArray &key) const
759  {
760  uint keyHash = generateHash(key);
761  uint position = keyHash % indexTableSize();
762  uint probeNumber = 1; // See insert() for description
763 
764  // Imagine 3 entries A, B, C in this logical probing chain. If B is
765  // later removed then we can't find C either. So, we must keep
766  // searching for probeNumber number of tries (or we find the item,
767  // obviously).
768  while (indexTable()[position].fileNameHash != keyHash &&
769  probeNumber < MAX_PROBE_COUNT)
770  {
771  position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
772  % indexTableSize();
773  probeNumber++;
774  }
775 
776  if (indexTable()[position].fileNameHash == keyHash) {
777  pageID firstPage = indexTable()[position].firstPage;
778  if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) {
779  return -1;
780  }
781 
782  const void *resultPage = page(firstPage);
783  if (KDE_ISUNLIKELY(!resultPage)) {
784  throw KSDCCorrupted();
785  }
786 
787  const char *utf8FileName = reinterpret_cast<const char *>(resultPage);
788  if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) {
789  return position;
790  }
791  }
792 
793  return -1; // Not found, or a different one found.
794  }
795 
796  // Function to use with QSharedPointer in removeUsedPages below...
797  static void deleteTable(IndexTableEntry *table) {
798  delete [] table;
799  }
800 
811  uint removeUsedPages(uint numberNeeded)
812  {
813  if (numberNeeded == 0) {
814  kError(ksdcArea()) << "Internal error: Asked to remove exactly 0 pages for some reason.";
815  throw KSDCCorrupted();
816  }
817 
818  if (numberNeeded > pageTableSize()) {
819  kError(ksdcArea()) << "Internal error: Requested more space than exists in the cache.";
820  kError(ksdcArea()) << numberNeeded << "requested, " << pageTableSize() << "is the total possible.";
821  throw KSDCCorrupted();
822  }
823 
824  // If the cache free space is large enough we will defragment first
825  // instead since it's likely we're highly fragmented.
826  // Otherwise, we will (eventually) simply remove entries per the
827  // eviction order set for the cache until there is enough room
828  // available to hold the number of pages we need.
829 
830  kDebug(ksdcArea()) << "Removing old entries to free up" << numberNeeded << "pages,"
831  << cacheAvail << "are already theoretically available.";
832 
833  if (cacheAvail > 3 * numberNeeded) {
834  defragment();
835  uint result = findEmptyPages(numberNeeded);
836 
837  if (result < pageTableSize()) {
838  return result;
839  }
840  else {
841  kError(ksdcArea()) << "Just defragmented a locked cache, but still there"
842  << "isn't enough room for the current request.";
843  }
844  }
845 
846  // At this point we know we'll have to free some space up, so sort our
847  // list of entries by whatever the current criteria are and start
848  // killing expired entries.
849  QSharedPointer<IndexTableEntry> tablePtr(new IndexTableEntry[indexTableSize()], deleteTable);
850 
851  if (!tablePtr) {
852  kError(ksdcArea()) << "Unable to allocate temporary memory for sorting the cache!";
853  clearInternalTables();
854  throw KSDCCorrupted();
855  }
856 
857  // We use tablePtr to ensure the data is destroyed, but do the access
858  // via a helper pointer to allow for array ops.
859  IndexTableEntry *table = tablePtr.data();
860 
861  ::memcpy(table, indexTable(), sizeof(IndexTableEntry) * indexTableSize());
862 
863  // Our entry ID is simply its index into the
864  // index table, which qSort will rearrange all willy-nilly, so first
865  // we'll save the *real* entry ID into firstPage (which is useless in
866  // our copy of the index table). On the other hand if the entry is not
867  // used then we note that with -1.
868  for (uint i = 0; i < indexTableSize(); ++i) {
869  table[i].firstPage = table[i].useCount > 0 ? static_cast<pageID>(i)
870  : -1;
871  }
872 
873  // Declare the comparison function that we'll use to pass to qSort,
874  // based on our cache eviction policy.
875  bool (*compareFunction)(const IndexTableEntry &, const IndexTableEntry &);
876  switch((int) evictionPolicy) {
877  case (int) KSharedDataCache::EvictLeastOftenUsed:
878  case (int) KSharedDataCache::NoEvictionPreference:
879  default:
880  compareFunction = seldomUsedCompare;
881  break;
882 
883  case (int) KSharedDataCache::EvictLeastRecentlyUsed:
884  compareFunction = lruCompare;
885  break;
886 
887  case (int) KSharedDataCache::EvictOldest:
888  compareFunction = ageCompare;
889  break;
890  }
891 
892  qSort(table, table + indexTableSize(), compareFunction);
893 
894  // Least recently used entries will be in the front.
895  // Start killing until we have room.
896 
897  // Note on removeEntry: It expects an index into the index table,
898  // but our sorted list is all jumbled. But we stored the real index
899  // in the firstPage member.
900  // Remove entries until we've removed at least the required number
901  // of pages.
902  uint i = 0;
903  while (i < indexTableSize() && numberNeeded > cacheAvail) {
904  int curIndex = table[i++].firstPage; // Really an index, not a page
905 
906  // Removed everything, still no luck (or curIndex is set but too high).
907  if (curIndex < 0 || static_cast<uint>(curIndex) >= indexTableSize()) {
908  kError(ksdcArea()) << "Trying to remove index" << curIndex
909  << "out-of-bounds for index table of size" << indexTableSize();
910  throw KSDCCorrupted();
911  }
912 
913  kDebug(ksdcArea()) << "Removing entry of" << indexTable()[curIndex].totalItemSize
914  << "size";
915  removeEntry(curIndex);
916  }
917 
918  // At this point let's see if we have freed up enough data by
919  // defragmenting first and seeing if we can find that free space.
920  defragment();
921 
922  pageID result = pageTableSize();
923  while (i < indexTableSize() &&
924  (static_cast<uint>(result = findEmptyPages(numberNeeded))) >= pageTableSize())
925  {
926  int curIndex = table[i++].firstPage;
927 
928  if (curIndex < 0) {
929  // One last shot.
930  defragment();
931  return findEmptyPages(numberNeeded);
932  }
933 
934  if (KDE_ISUNLIKELY(static_cast<uint>(curIndex) >= indexTableSize())) {
935  throw KSDCCorrupted();
936  }
937 
938  removeEntry(curIndex);
939  }
940 
941  // Whew.
942  return result;
943  }
944 
945  // Returns the total size required for a given cache size.
946  static uint totalSize(uint cacheSize, uint effectivePageSize)
947  {
948  uint numberPages = intCeil(cacheSize, effectivePageSize);
949  uint indexTableSize = numberPages / 2;
950 
951  // Knowing the number of pages, we can determine what addresses we'd be
952  // using (properly aligned), and from there determine how much memory
953  // we'd use.
954  IndexTableEntry *indexTableStart =
955  offsetAs<IndexTableEntry>(static_cast<void*>(0), sizeof (SharedMemory));
956 
957  indexTableStart += indexTableSize;
958 
959  PageTableEntry *pageTableStart = reinterpret_cast<PageTableEntry *>(indexTableStart);
960  pageTableStart = alignTo<PageTableEntry>(pageTableStart);
961  pageTableStart += numberPages;
962 
963  // The weird part, we must manually adjust the pointer based on the page size.
964  char *cacheStart = reinterpret_cast<char *>(pageTableStart);
965  cacheStart += (numberPages * effectivePageSize);
966 
967  // ALIGNOF gives pointer alignment
968  cacheStart = alignTo<char>(cacheStart, ALIGNOF(void*));
969 
970  // We've traversed the header, index, page table, and cache.
971  // Wherever we're at now is the size of the enchilada.
972  return static_cast<uint>(reinterpret_cast<quintptr>(cacheStart));
973  }
974 
975  uint fileNameHash(const QByteArray &utf8FileName) const
976  {
977  return generateHash(utf8FileName) % indexTableSize();
978  }
979 
980  void clear()
981  {
982  clearInternalTables();
983  }
984 
985  void removeEntry(uint index);
986 };
987 
988 // The per-instance private data, such as map size, whether
989 // attached or not, pointer to shared memory, etc.
990 class KSharedDataCache::Private
991 {
992  public:
993  Private(const QString &name,
994  unsigned defaultCacheSize,
995  unsigned expectedItemSize
996  )
997  : m_cacheName(name)
998  , shm(0)
999  , m_lock(0)
1000  , m_mapSize(0)
1001  , m_defaultCacheSize(defaultCacheSize)
1002  , m_expectedItemSize(expectedItemSize)
1003  , m_expectedType(LOCKTYPE_INVALID)
1004  {
1005  mapSharedMemory();
1006  }
1007 
1008  // Put the cache in a condition to be able to call mapSharedMemory() by
1009  // completely detaching from shared memory (such as to respond to an
1010  // unrecoverable error).
1011  // m_mapSize must already be set to the amount of memory mapped to shm.
1012  void detachFromSharedMemory()
1013  {
1014  // The lock holds a reference into shared memory, so this must be
1015  // cleared before shm is removed.
1016  m_lock.clear();
1017 
1018  if (shm && 0 != ::munmap(shm, m_mapSize)) {
1019  kError(ksdcArea()) << "Unable to unmap shared memory segment"
1020  << static_cast<void*>(shm) << ":" << ::strerror(errno);
1021  }
1022 
1023  shm = 0;
1024  m_mapSize = 0;
1025  }
1026 
1027  // This function does a lot of the important work, attempting to connect to shared
1028  // memory, a private anonymous mapping if that fails, and failing that, nothing (but
1029  // the cache remains "valid", we just don't actually do anything).
1030  void mapSharedMemory()
1031  {
1032  // 0-sized caches are fairly useless.
1033  unsigned cacheSize = qMax(m_defaultCacheSize, uint(SharedMemory::MINIMUM_CACHE_SIZE));
1034  unsigned pageSize = SharedMemory::equivalentPageSize(m_expectedItemSize);
1035 
1036  // Ensure that the cache is sized such that there is a minimum number of
1037  // pages available. (i.e. a cache consisting of only 1 page is fairly
1038  // useless and probably crash-prone).
1039  cacheSize = qMax(pageSize * 256, cacheSize);
1040 
1041  // The m_cacheName is used to find the file to store the cache in.
1042  QString cacheName = KGlobal::dirs()->locateLocal("cache", m_cacheName + QLatin1String(".kcache"));
1043  QFile file(cacheName);
1044 
1045  // The basic idea is to open the file that we want to map into shared
1046  // memory, and then actually establish the mapping. Once we have mapped the
1047  // file into shared memory we can close the file handle, the mapping will
1048  // still be maintained (unless the file is resized to be shorter than
1049  // expected, which we don't handle yet :-( )
1050 
1051  // size accounts for the overhead over the desired cacheSize
1052  uint size = SharedMemory::totalSize(cacheSize, pageSize);
1053  void *mapAddress = MAP_FAILED;
1054 
1055  if (size < cacheSize) {
1056  kError(ksdcArea()) << "Asked for a cache size less than requested size somehow -- Logic Error :(";
1057  return;
1058  }
1059 
1060  // We establish the shared memory mapping here, only if we will have appropriate
1061  // mutex support (systemSupportsProcessSharing), then we:
1062  // Open the file and resize to some sane value if the file is too small.
1063  if (file.open(QIODevice::ReadWrite) &&
1064  (file.size() >= size ||
1065  (file.resize(size) && ensureFileAllocated(file.handle(), size))))
1066  {
1067  // Use mmap directly instead of QFile::map since the QFile (and its
1068  // shared mapping) will disappear unless we hang onto the QFile for no
1069  // reason (see the note below, we don't care about the file per se...)
1070  mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1071 
1072  // So... it is possible that someone else has mapped this cache already
1073  // with a larger size. If that's the case we need to at least match
1074  // the size to be able to access every entry, so fixup the mapping.
1075  if (mapAddress != MAP_FAILED) {
1076  SharedMemory *mapped = reinterpret_cast<SharedMemory *>(mapAddress);
1077 
1078  // First make sure that the version of the cache on disk is
1079  // valid. We also need to check that version != 0 to
1080  // disambiguate against an uninitialized cache.
1081  if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION &&
1082  mapped->version > 0)
1083  {
1084  kWarning(ksdcArea()) << "Deleting wrong version of cache" << cacheName;
1085 
1086  // CAUTION: Potentially recursive since the recovery
1087  // involves calling this function again.
1088  m_mapSize = size;
1089  shm = mapped;
1090  recoverCorruptedCache();
1091  return;
1092  }
1093  else if (mapped->cacheSize > cacheSize) {
1094  // This order is very important. We must save the cache size
1095  // before we remove the mapping, but unmap before overwriting
1096  // the previous mapping size...
1097  cacheSize = mapped->cacheSize;
1098  unsigned actualPageSize = mapped->cachePageSize();
1099  ::munmap(mapAddress, size);
1100  size = SharedMemory::totalSize(cacheSize, actualPageSize);
1101  mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1102  }
1103  }
1104  }
1105 
1106  // We could be here without the mapping established if:
1107  // 1) Process-shared synchronization is not supported, either at compile or run time,
1108  // 2) Unable to open the required file.
1109  // 3) Unable to resize the file to be large enough.
1110  // 4) Establishing the mapping failed.
1111  // 5) The mapping succeeded, but the size was wrong and we were unable to map when
1112  // we tried again.
1113  // 6) The incorrect version of the cache was detected.
1114  // 7) The file could be created, but posix_fallocate failed to commit it fully to disk.
1115  // In any of these cases, attempt to fallback to the
1116  // better-supported anonymous private page style of mmap. This memory won't
1117  // be shared, but our code will still work the same.
1118  // NOTE: We never use the on-disk representation independently of the
1119  // shared memory. If we don't get shared memory the disk info is ignored,
1120  // if we do get shared memory we never look at disk again.
1121  if (mapAddress == MAP_FAILED) {
1122  kWarning(ksdcArea()) << "Failed to establish shared memory mapping, will fallback"
1123  << "to private memory -- memory usage will increase";
1124 
1125  mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1126  }
1127 
1128  // Well now we're really hosed. We can still work, but we can't even cache
1129  // data.
1130  if (mapAddress == MAP_FAILED) {
1131  kError(ksdcArea()) << "Unable to allocate shared memory segment for shared data cache"
1132  << cacheName << "of size" << cacheSize;
1133  return;
1134  }
1135 
1136  m_mapSize = size;
1137 
1138  // We never actually construct shm, but we assign it the same address as the
1139  // shared memory we just mapped, so effectively shm is now a SharedMemory that
1140  // happens to be located at mapAddress.
1141  shm = reinterpret_cast<SharedMemory *>(mapAddress);
1142 
1143  // If we were first to create this memory map, all data will be 0.
1144  // Therefore if ready == 0 we're not initialized. A fully initialized
1145  // header will have ready == 2. Why?
1146  // Because 0 means "safe to initialize"
1147  // 1 means "in progress of initing"
1148  // 2 means "ready"
1149  uint usecSleepTime = 8; // Start by sleeping for 8 microseconds
1150  while (shm->ready != 2) {
1151  if (KDE_ISUNLIKELY(usecSleepTime >= (1 << 21))) {
1152  // Didn't acquire within ~8 seconds? Assume an issue exists
1153  kError(ksdcArea()) << "Unable to acquire shared lock, is the cache corrupt?";
1154 
1155  file.remove(); // Unlink the cache in case it's corrupt.
1156  detachFromSharedMemory();
1157  return; // Fallback to QCache (later)
1158  }
1159 
1160  if (shm->ready.testAndSetAcquire(0, 1)) {
1161  if (!shm->performInitialSetup(cacheSize, pageSize)) {
1162  kError(ksdcArea()) << "Unable to perform initial setup, this system probably "
1163  "does not really support process-shared pthreads or "
1164  "semaphores, even though it claims otherwise.";
1165 
1166  file.remove();
1167  detachFromSharedMemory();
1168  return;
1169  }
1170  }
1171  else {
1172  usleep(usecSleepTime); // spin
1173 
1174  // Exponential fallback as in Ethernet and similar collision resolution methods
1175  usecSleepTime *= 2;
1176  }
1177  }
1178 
1179  m_expectedType = shm->shmLock.type;
1180  m_lock = QSharedPointer<KSDCLock>(createLockFromId(m_expectedType, shm->shmLock));
1181  bool isProcessSharingSupported = false;
1182 
1183  if (!m_lock->initialize(isProcessSharingSupported)) {
1184  kError(ksdcArea()) << "Unable to setup shared cache lock, although it worked when created.";
1185  detachFromSharedMemory();
1186  }
1187  }
1188 
1189  // Called whenever the cache is apparently corrupt (for instance, a timeout trying to
1190  // lock the cache). In this situation it is safer just to destroy it all and try again.
1191  void recoverCorruptedCache()
1192  {
1193  KSharedDataCache::deleteCache(m_cacheName);
1194 
1195  detachFromSharedMemory();
1196 
1197  // Do this even if we weren't previously cached -- it might work now.
1198  mapSharedMemory();
1199  }
1200 
1201  // This should be called for any memory access to shared memory. This
1202  // function will verify that the bytes [base, base+accessLength) are
1203  // actually mapped to d->shm. The cache itself may have incorrect cache
1204  // page sizes, incorrect cache size, etc. so this function should be called
1205  // despite the cache data indicating it should be safe.
1206  //
1207  // If the access is /not/ safe then a KSDCCorrupted exception will be
1208  // thrown, so be ready to catch that.
1209  void verifyProposedMemoryAccess(const void *base, unsigned accessLength) const
1210  {
1211  quintptr startOfAccess = reinterpret_cast<quintptr>(base);
1212  quintptr startOfShm = reinterpret_cast<quintptr>(shm);
1213 
1214  if (KDE_ISUNLIKELY(startOfAccess < startOfShm)) {
1215  throw KSDCCorrupted();
1216  }
1217 
1218  quintptr endOfShm = startOfShm + m_mapSize;
1219  quintptr endOfAccess = startOfAccess + accessLength;
1220 
1221  // Check for unsigned integer wraparound, and then
1222  // bounds access
1223  if (KDE_ISUNLIKELY((endOfShm < startOfShm) ||
1224  (endOfAccess < startOfAccess) ||
1225  (endOfAccess > endOfShm)))
1226  {
1227  throw KSDCCorrupted();
1228  }
1229  }
1230 
1231  bool lock() const
1232  {
1233  if (KDE_ISLIKELY(shm && shm->shmLock.type == m_expectedType)) {
1234  return m_lock->lock();
1235  }
1236 
1237  // No shm or wrong type --> corrupt!
1238  throw KSDCCorrupted();
1239  }
1240 
1241  void unlock() const
1242  {
1243  m_lock->unlock();
1244  }
1245 
1246  class CacheLocker
1247  {
1248  mutable Private * d;
1249 
1250  bool cautiousLock()
1251  {
1252  int lockCount = 0;
1253 
1254  // Locking can fail due to a timeout. If it happens too often even though
1255  // we're taking corrective action assume there's some disastrous problem
1256  // and give up.
1257  while (!d->lock()) {
1258  d->recoverCorruptedCache();
1259 
1260  if (!d->shm) {
1261  kWarning(ksdcArea()) << "Lost the connection to shared memory for cache"
1262  << d->m_cacheName;
1263  return false;
1264  }
1265 
1266  if (lockCount++ > 4) {
1267  kError(ksdcArea()) << "There is a very serious problem with the KDE data cache"
1268  << d->m_cacheName << "giving up trying to access cache.";
1269  d->detachFromSharedMemory();
1270  return false;
1271  }
1272  }
1273 
1274  return true;
1275  }
1276 
1277  public:
1278  CacheLocker(const Private *_d) : d(const_cast<Private *>(_d))
1279  {
1280  if (KDE_ISUNLIKELY(!d || !d->shm || !cautiousLock())) {
1281  return;
1282  }
1283 
1284  uint testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
1285 
1286  // A while loop? Indeed, think what happens if this happens
1287  // twice -- hard to debug race conditions.
1288  while (testSize > d->m_mapSize) {
1289  kDebug(ksdcArea()) << "Someone enlarged the cache on us,"
1290  << "attempting to match new configuration.";
1291 
1292  // Protect against two threads accessing this same KSDC
1293  // from trying to execute the following remapping at the
1294  // same time.
1295  QMutexLocker d_locker(&d->m_threadLock);
1296  if (testSize == d->m_mapSize) {
1297  break; // Bail if the other thread already solved.
1298  }
1299 
1300  // Linux supports mremap, but it's not portable. So,
1301  // drop the map and (try to) re-establish.
1302  d->unlock();
1303 
1304 #ifdef KSDC_MSYNC_SUPPORTED
1305  ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1306 #endif
1307  ::munmap(d->shm, d->m_mapSize);
1308  d->m_mapSize = 0;
1309  d->shm = 0;
1310 
1311  QFile f(d->m_cacheName);
1312  if (!f.open(QFile::ReadWrite)) {
1313  kError(ksdcArea()) << "Unable to re-open cache, unfortunately"
1314  << "the connection had to be dropped for"
1315  << "crash safety -- things will be much"
1316  << "slower now.";
1317  return;
1318  }
1319 
1320  void *newMap = ::mmap(0, testSize, PROT_READ | PROT_WRITE,
1321  MAP_SHARED, f.handle(), 0);
1322  if (newMap == MAP_FAILED) {
1323  kError(ksdcArea()) << "Unopen to re-map the cache into memory"
1324  << "things will be much slower now";
1325  return;
1326  }
1327 
1328  d->shm = reinterpret_cast<SharedMemory *>(newMap);
1329  d->m_mapSize = testSize;
1330 
1331  if (!cautiousLock()) {
1332  return;
1333  }
1334 
1335  testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
1336  }
1337  }
1338 
1339  ~CacheLocker()
1340  {
1341  if (d && d->shm) {
1342  d->unlock();
1343  }
1344  }
1345 
1346  bool failed() const
1347  {
1348  return !d || d->shm == 0;
1349  }
1350  };
1351 
1352  QString m_cacheName;
1353  QMutex m_threadLock;
1354  SharedMemory *shm;
1355  QSharedPointer<KSDCLock> m_lock;
1356  uint m_mapSize;
1357  uint m_defaultCacheSize;
1358  uint m_expectedItemSize;
1359  SharedLockId m_expectedType;
1360 };
1361 
1362 // Must be called while the lock is already held!
1363 void SharedMemory::removeEntry(uint index)
1364 {
1365  if (index >= indexTableSize() || cacheAvail > pageTableSize()) {
1366  throw KSDCCorrupted();
1367  }
1368 
1369  PageTableEntry *pageTableEntries = pageTable();
1370  IndexTableEntry *entriesIndex = indexTable();
1371 
1372  // Update page table first
1373  pageID firstPage = entriesIndex[index].firstPage;
1374  if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) {
1375  kDebug(ksdcArea()) << "Trying to remove an entry which is already invalid. This "
1376  << "cache is likely corrupt.";
1377  throw KSDCCorrupted();
1378  }
1379 
1380  if (index != static_cast<uint>(pageTableEntries[firstPage].index)) {
1381  kError(ksdcArea()) << "Removing entry" << index << "but the matching data"
1382  << "doesn't link back -- cache is corrupt, clearing.";
1383  throw KSDCCorrupted();
1384  }
1385 
1386  uint entriesToRemove = intCeil(entriesIndex[index].totalItemSize, cachePageSize());
1387  uint savedCacheSize = cacheAvail;
1388  for (uint i = firstPage; i < pageTableSize() &&
1389  (uint) pageTableEntries[i].index == index; ++i)
1390  {
1391  pageTableEntries[i].index = -1;
1392  cacheAvail++;
1393  }
1394 
1395  if ((cacheAvail - savedCacheSize) != entriesToRemove) {
1396  kError(ksdcArea()) << "We somehow did not remove" << entriesToRemove
1397  << "when removing entry" << index << ", instead we removed"
1398  << (cacheAvail - savedCacheSize);
1399  throw KSDCCorrupted();
1400  }
1401 
1402  // For debugging
1403 #ifdef NDEBUG
1404  void *const startOfData = page(firstPage);
1405  if (startOfData) {
1406  QByteArray str((const char *) startOfData);
1407  str.prepend(" REMOVED: ");
1408  str.prepend(QByteArray::number(index));
1409  str.prepend("ENTRY ");
1410 
1411  ::memcpy(startOfData, str.constData(), str.size() + 1);
1412  }
1413 #endif
1414 
1415  // Update the index
1416  entriesIndex[index].fileNameHash = 0;
1417  entriesIndex[index].totalItemSize = 0;
1418  entriesIndex[index].useCount = 0;
1419  entriesIndex[index].lastUsedTime = 0;
1420  entriesIndex[index].addTime = 0;
1421  entriesIndex[index].firstPage = -1;
1422 }
1423 
1424 KSharedDataCache::KSharedDataCache(const QString &cacheName,
1425  unsigned defaultCacheSize,
1426  unsigned expectedItemSize)
1427  : d(0)
1428 {
1429  try {
1430  d = new Private(cacheName, defaultCacheSize, expectedItemSize);
1431  }
1432  catch(KSDCCorrupted) {
1433  KSharedDataCache::deleteCache(cacheName);
1434 
1435  // Try only once more
1436  try {
1437  d = new Private(cacheName, defaultCacheSize, expectedItemSize);
1438  }
1439  catch(KSDCCorrupted) {
1440  kError(ksdcArea())
1441  << "Even a brand-new cache starts off corrupted, something is"
1442  << "seriously wrong. :-(";
1443  d = 0; // Just in case
1444  }
1445  }
1446 }
1447 
1448 KSharedDataCache::~KSharedDataCache()
1449 {
1450  // Note that there is no other actions required to separate from the
1451  // shared memory segment, simply unmapping is enough. This makes things
1452  // *much* easier so I'd recommend maintaining this ideal.
1453  if (!d) {
1454  return;
1455  }
1456 
1457  if (d->shm) {
1458 #ifdef KSDC_MSYNC_SUPPORTED
1459  ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1460 #endif
1461  ::munmap(d->shm, d->m_mapSize);
1462  }
1463 
1464  // Do not delete d->shm, it was never constructed, it's just an alias.
1465  d->shm = 0;
1466 
1467  delete d;
1468 }
1469 
1470 bool KSharedDataCache::insert(const QString &key, const QByteArray &data)
1471 {
1472  try {
1473  Private::CacheLocker lock(d);
1474  if (lock.failed()) {
1475  return false;
1476  }
1477 
1478  QByteArray encodedKey = key.toUtf8();
1479  uint keyHash = generateHash(encodedKey);
1480  uint position = keyHash % d->shm->indexTableSize();
1481 
1482  // See if we're overwriting an existing entry.
1483  IndexTableEntry *indices = d->shm->indexTable();
1484 
1485  // In order to avoid the issue of a very long-lived cache having items
1486  // with a use count of 1 near-permanently, we attempt to artifically
1487  // reduce the use count of long-lived items when there is high load on
1488  // the cache. We do this randomly, with a weighting that makes the event
1489  // impossible if load < 0.5, and guaranteed if load >= 0.96.
1490  const static double startCullPoint = 0.5l;
1491  const static double mustCullPoint = 0.96l;
1492 
1493  // cacheAvail is in pages, cacheSize is in bytes.
1494  double loadFactor = 1.0 - (1.0l * d->shm->cacheAvail * d->shm->cachePageSize()
1495  / d->shm->cacheSize);
1496  bool cullCollisions = false;
1497 
1498  if (KDE_ISUNLIKELY(loadFactor >= mustCullPoint)) {
1499  cullCollisions = true;
1500  }
1501  else if (loadFactor > startCullPoint) {
1502  const int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint);
1503  if (KRandom::random() >= tripWireValue) {
1504  cullCollisions = true;
1505  }
1506  }
1507 
1508  // In case of collisions in the index table (i.e. identical positions), use
1509  // quadratic chaining to attempt to find an empty slot. The equation we use
1510  // is:
1511  // position = (hash + (i + i*i) / 2) % size, where i is the probe number.
1512  uint probeNumber = 1;
1513  while (indices[position].useCount > 0 && probeNumber < MAX_PROBE_COUNT) {
1514  // If we actually stumbled upon an old version of the key we are
1515  // overwriting, then use that position, do not skip over it.
1516 
1517  if (KDE_ISUNLIKELY(indices[position].fileNameHash == keyHash)) {
1518  break;
1519  }
1520 
1521  // If we are "culling" old entries, see if this one is old and if so
1522  // reduce its use count. If it reduces to zero then eliminate it and
1523  // use its old spot.
1524 
1525  if (cullCollisions && (::time(0) - indices[position].lastUsedTime) > 60) {
1526  indices[position].useCount >>= 1;
1527  if (indices[position].useCount == 0) {
1528  kDebug(ksdcArea()) << "Overwriting existing old cached entry due to collision.";
1529  d->shm->removeEntry(position); // Remove it first
1530 
1531  break;
1532  }
1533  }
1534 
1535  position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
1536  % d->shm->indexTableSize();
1537  probeNumber++;
1538  }
1539 
1540  if (indices[position].useCount > 0 && indices[position].firstPage >= 0) {
1541  kDebug(ksdcArea()) << "Overwriting existing cached entry due to collision.";
1542  d->shm->removeEntry(position); // Remove it first
1543  }
1544 
1545  // Data will be stored as fileNamefoo\0PNGimagedata.....
1546  // So total size required is the length of the encoded file name + 1
1547  // for the trailing null, and then the length of the image data.
1548  uint fileNameLength = 1 + encodedKey.length();
1549  uint requiredSize = fileNameLength + data.size();
1550  uint pagesNeeded = intCeil(requiredSize, d->shm->cachePageSize());
1551  uint firstPage = (uint) -1;
1552 
1553  if (pagesNeeded >= d->shm->pageTableSize()) {
1554  kWarning(ksdcArea()) << key << "is too large to be cached.";
1555  return false;
1556  }
1557 
1558  // If the cache has no room, or the fragmentation is too great to find
1559  // the required number of consecutive free pages, take action.
1560  if (pagesNeeded > d->shm->cacheAvail ||
1561  (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize())
1562  {
1563  // If we have enough free space just defragment
1564  uint freePagesDesired = 3 * qMax(1u, pagesNeeded / 2);
1565 
1566  if (d->shm->cacheAvail > freePagesDesired) {
1567  // TODO: How the hell long does this actually take on real
1568  // caches?
1569  d->shm->defragment();
1570  firstPage = d->shm->findEmptyPages(pagesNeeded);
1571  }
1572  else {
1573  // If we already have free pages we don't want to remove a ton
1574  // extra. However we can't rely on the return value of
1575  // removeUsedPages giving us a good location since we're not
1576  // passing in the actual number of pages that we need.
1577  d->shm->removeUsedPages(qMin(2 * freePagesDesired, d->shm->pageTableSize())
1578  - d->shm->cacheAvail);
1579  firstPage = d->shm->findEmptyPages(pagesNeeded);
1580  }
1581 
1582  if (firstPage >= d->shm->pageTableSize() ||
1583  d->shm->cacheAvail < pagesNeeded)
1584  {
1585  kError(ksdcArea()) << "Unable to free up memory for" << key;
1586  return false;
1587  }
1588  }
1589 
1590  // Update page table
1591  PageTableEntry *table = d->shm->pageTable();
1592  for (uint i = 0; i < pagesNeeded; ++i) {
1593  table[firstPage + i].index = position;
1594  }
1595 
1596  // Update index
1597  indices[position].fileNameHash = keyHash;
1598  indices[position].totalItemSize = requiredSize;
1599  indices[position].useCount = 1;
1600  indices[position].addTime = ::time(0);
1601  indices[position].lastUsedTime = indices[position].addTime;
1602  indices[position].firstPage = firstPage;
1603 
1604  // Update cache
1605  d->shm->cacheAvail -= pagesNeeded;
1606 
1607  // Actually move the data in place
1608  void *dataPage = d->shm->page(firstPage);
1609  if (KDE_ISUNLIKELY(!dataPage)) {
1610  throw KSDCCorrupted();
1611  }
1612 
1613  // Verify it will all fit
1614  d->verifyProposedMemoryAccess(dataPage, requiredSize);
1615 
1616  // Cast for byte-sized pointer arithmetic
1617  uchar *startOfPageData = reinterpret_cast<uchar *>(dataPage);
1618  ::memcpy(startOfPageData, encodedKey.constData(), fileNameLength);
1619  ::memcpy(startOfPageData + fileNameLength, data.constData(), data.size());
1620 
1621  return true;
1622  }
1623  catch(KSDCCorrupted) {
1624  d->recoverCorruptedCache();
1625  return false;
1626  }
1627 }
1628 
1629 bool KSharedDataCache::find(const QString &key, QByteArray *destination) const
1630 {
1631  try {
1632  Private::CacheLocker lock(d);
1633  if (lock.failed()) {
1634  return false;
1635  }
1636 
1637  // Search in the index for our data, hashed by key;
1638  QByteArray encodedKey = key.toUtf8();
1639  qint32 entry = d->shm->findNamedEntry(encodedKey);
1640 
1641  if (entry >= 0) {
1642  const IndexTableEntry *header = &d->shm->indexTable()[entry];
1643  const void *resultPage = d->shm->page(header->firstPage);
1644  if (KDE_ISUNLIKELY(!resultPage)) {
1645  throw KSDCCorrupted();
1646  }
1647 
1648  d->verifyProposedMemoryAccess(resultPage, header->totalItemSize);
1649 
1650  header->useCount++;
1651  header->lastUsedTime = ::time(0);
1652 
1653  // Our item is the key followed immediately by the data, so skip
1654  // past the key.
1655  const char *cacheData = reinterpret_cast<const char *>(resultPage);
1656  cacheData += encodedKey.size();
1657  cacheData++; // Skip trailing null -- now we're pointing to start of data
1658 
1659  if (destination) {
1660  *destination = QByteArray(cacheData, header->totalItemSize - encodedKey.size() - 1);
1661  }
1662 
1663  return true;
1664  }
1665  }
1666  catch(KSDCCorrupted) {
1667  d->recoverCorruptedCache();
1668  }
1669 
1670  return false;
1671 }
1672 
1673 void KSharedDataCache::clear()
1674 {
1675  try {
1676  Private::CacheLocker lock(d);
1677 
1678  if(!lock.failed()) {
1679  d->shm->clear();
1680  }
1681  }
1682  catch(KSDCCorrupted) {
1683  d->recoverCorruptedCache();
1684  }
1685 }
1686 
1687 bool KSharedDataCache::contains(const QString &key) const
1688 {
1689  try {
1690  Private::CacheLocker lock(d);
1691  if (lock.failed()) {
1692  return false;
1693  }
1694 
1695  return d->shm->findNamedEntry(key.toUtf8()) >= 0;
1696  }
1697  catch(KSDCCorrupted) {
1698  d->recoverCorruptedCache();
1699  return false;
1700  }
1701 }
1702 
1703 void KSharedDataCache::deleteCache(const QString &cacheName)
1704 {
1705  QString cachePath = KGlobal::dirs()->locateLocal("cache", cacheName + QLatin1String(".kcache"));
1706 
1707  // Note that it is important to simply unlink the file, and not truncate it
1708  // smaller first to avoid SIGBUS errors and similar with shared memory
1709  // attached to the underlying inode.
1710  kDebug(ksdcArea()) << "Removing cache at" << cachePath;
1711  QFile::remove(cachePath);
1712 }
1713 
1714 unsigned KSharedDataCache::totalSize() const
1715 {
1716  try {
1717  Private::CacheLocker lock(d);
1718  if (lock.failed()) {
1719  return 0u;
1720  }
1721 
1722  return d->shm->cacheSize;
1723  }
1724  catch(KSDCCorrupted) {
1725  d->recoverCorruptedCache();
1726  return 0u;
1727  }
1728 }
1729 
1730 unsigned KSharedDataCache::freeSize() const
1731 {
1732  try {
1733  Private::CacheLocker lock(d);
1734  if (lock.failed()) {
1735  return 0u;
1736  }
1737 
1738  return d->shm->cacheAvail * d->shm->cachePageSize();
1739  }
1740  catch(KSDCCorrupted) {
1741  d->recoverCorruptedCache();
1742  return 0u;
1743  }
1744 }
1745 
1746 KSharedDataCache::EvictionPolicy KSharedDataCache::evictionPolicy() const
1747 {
1748  if (d && d->shm) {
1749  return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(0));
1750  }
1751 
1752  return NoEvictionPreference;
1753 }
1754 
1755 void KSharedDataCache::setEvictionPolicy(EvictionPolicy newPolicy)
1756 {
1757  if (d && d->shm) {
1758  d->shm->evictionPolicy.fetchAndStoreRelease(static_cast<int>(newPolicy));
1759  }
1760 }
1761 
1762 unsigned KSharedDataCache::timestamp() const
1763 {
1764  if (d && d->shm) {
1765  return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(0));
1766  }
1767 
1768  return 0;
1769 }
1770 
1771 void KSharedDataCache::setTimestamp(unsigned newTimestamp)
1772 {
1773  if (d && d->shm) {
1774  d->shm->cacheTimestamp.fetchAndStoreRelease(static_cast<int>(newTimestamp));
1775  }
1776 }
This file is part of the KDE documentation.
Documentation copyright © 1996-2014 The KDE developers.
Generated on Tue Sep 23 2014 09:53:09 by doxygen 1.8.3.1 written by Dimitri van Heesch, © 1997-2006

KDE's Doxygen guidelines are available online.

KDECore

Skip menu "KDECore"
  • Main Page
  • Namespace List
  • Namespace Members
  • Alphabetical List
  • Class List
  • Class Hierarchy
  • Class Members
  • File List
  • File Members
  • Modules
  • Related Pages

kdelibs-4.11.5 API Reference

Skip menu "kdelibs-4.11.5 API Reference"
  • DNSSD
  • Interfaces
  •   KHexEdit
  •   KMediaPlayer
  •   KSpeech
  •   KTextEditor
  • kconf_update
  • KDE3Support
  •   KUnitTest
  • KDECore
  • KDED
  • KDEsu
  • KDEUI
  • KDEWebKit
  • KDocTools
  • KFile
  • KHTML
  • KImgIO
  • KInit
  • kio
  • KIOSlave
  • KJS
  •   KJS-API
  •   WTF
  • kjsembed
  • KNewStuff
  • KParts
  • KPty
  • Kross
  • KUnitConversion
  • KUtils
  • Nepomuk
  • Plasma
  • Solid
  • Sonnet
  • ThreadWeaver
Report problems with this website to our bug tracking system.
Contact the specific authors with questions and comments about the page contents.

KDE® and the K Desktop Environment® logo are registered trademarks of KDE e.V. | Legal