objc-cache-old.mm 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803
  1. /*
  2. * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. /***********************************************************************
  24. * objc-cache.m
  25. * Method cache management
  26. * Cache flushing
  27. * Cache garbage collection
  28. * Cache instrumentation
  29. * Dedicated allocator for large caches
  30. **********************************************************************/
  31. /***********************************************************************
  32. * Method cache locking (GrP 2001-1-14)
  33. *
  34. * For speed, objc_msgSend does not acquire any locks when it reads
  35. * method caches. Instead, all cache changes are performed so that any
  36. * objc_msgSend running concurrently with the cache mutator will not
  37. * crash or hang or get an incorrect result from the cache.
  38. *
  39. * When cache memory becomes unused (e.g. the old cache after cache
  40. * expansion), it is not immediately freed, because a concurrent
  41. * objc_msgSend could still be using it. Instead, the memory is
  42. * disconnected from the data structures and placed on a garbage list.
  43. * The memory is now only accessible to instances of objc_msgSend that
  44. * were running when the memory was disconnected; any further calls to
  45. * objc_msgSend will not see the garbage memory because the other data
  46. * structures don't point to it anymore. The collecting_in_critical
  47. * function checks the PC of all threads and returns FALSE when all threads
  48. * are found to be outside objc_msgSend. This means any call to objc_msgSend
  49. * that could have had access to the garbage has finished or moved past the
  50. * cache lookup stage, so it is safe to free the memory.
  51. *
  52. * All functions that modify cache data or structures must acquire the
  53. * cacheUpdateLock to prevent interference from concurrent modifications.
  54. * The function that frees cache garbage must acquire the cacheUpdateLock
  55. * and use collecting_in_critical() to flush out cache readers.
  56. * The cacheUpdateLock is also used to protect the custom allocator used
  57. * for large method cache blocks.
  58. *
  59. * Cache readers (PC-checked by collecting_in_critical())
  60. * objc_msgSend*
  61. * _cache_getImp
  62. * _cache_getMethod
  63. *
  64. * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
  65. * _cache_fill (acquires lock)
  66. * _cache_expand (only called from cache_fill)
  67. * _cache_create (only called from cache_expand)
  68. * bcopy (only called from instrumented cache_expand)
  69. * flush_caches (acquires lock)
  70. * _cache_flush (only called from cache_fill and flush_caches)
  71. * _cache_collect_free (only called from cache_expand and cache_flush)
  72. *
  73. * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
  74. * _cache_print
  75. * _class_printMethodCaches
  76. * _class_printDuplicateCacheEntries
  77. * _class_printMethodCacheStatistics
  78. *
  79. * _class_lookupMethodAndLoadCache is a special case. It may read a
  80. * method triplet out of one cache and store it in another cache. This
  81. * is unsafe if the method triplet is a forward:: entry, because the
  82. * triplet itself could be freed unless _class_lookupMethodAndLoadCache
  83. * were PC-checked or used a lock. Additionally, storing the method
  84. * triplet in both caches would result in double-freeing if both caches
  85. * were flushed or expanded. The solution is for _cache_getMethod to
  86. * ignore all entries whose implementation is _objc_msgForward_impcache,
  87. * so _class_lookupMethodAndLoadCache cannot look at a forward:: entry
  88. * unsafely or place it in multiple caches.
  89. ***********************************************************************/
  90. #if !__OBJC2__
  91. #include "objc-private.h"
  92. #include "objc-cache-old.h"
  93. #include "hashtable2.h"
  94. typedef struct {
  95. SEL name; // same layout as struct old_method
  96. void *unused;
  97. IMP imp; // same layout as struct old_method
  98. } cache_entry;
  99. /* When _class_slow_grow is non-zero, any given cache is actually grown
  100. * only on the odd-numbered times it becomes full; on the even-numbered
  101. * times, it is simply emptied and re-used. When this flag is zero,
  102. * caches are grown every time. */
  103. static const int _class_slow_grow = 1;
  104. /* For min cache size: clear_cache=1, slow_grow=1
  105. For max cache size: clear_cache=0, slow_grow=0 */
  106. /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
  107. enum {
  108. INIT_CACHE_SIZE_LOG2 = 2,
  109. INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
  110. };
  111. /* Amount of space required for `count` hash table buckets, knowing that
  112. * one entry is embedded in the cache structure itself. */
  113. #define TABLE_SIZE(count) ((count - 1) * sizeof(cache_entry *))
  114. #if !TARGET_OS_WIN32
  115. # define CACHE_ALLOCATOR
  116. #endif
  117. /* Custom cache allocator parameters.
  118. * CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM. */
  119. #define CACHE_ALLOCATOR_MIN 512
  120. #define CACHE_QUANTUM (CACHE_ALLOCATOR_MIN+sizeof(struct objc_cache)-sizeof(cache_entry*))
  121. #define CACHE_REGION_SIZE ((128*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
  122. // #define CACHE_REGION_SIZE ((256*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
  123. static uintptr_t cache_allocator_mask_for_size(size_t size)
  124. {
  125. return (size - sizeof(struct objc_cache)) / sizeof(cache_entry *);
  126. }
  127. static size_t cache_allocator_size_for_mask(uintptr_t mask)
  128. {
  129. size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
  130. size_t actual = CACHE_QUANTUM;
  131. while (actual < requested) actual += CACHE_QUANTUM;
  132. return actual;
  133. }
  134. /* Cache instrumentation data. Immediately follows the cache block itself. */
  135. #ifdef OBJC_INSTRUMENTED
  136. typedef struct
  137. {
  138. unsigned int hitCount; // cache lookup success tally
  139. unsigned int hitProbes; // sum entries checked to hit
  140. unsigned int maxHitProbes; // max entries checked to hit
  141. unsigned int missCount; // cache lookup no-find tally
  142. unsigned int missProbes; // sum entries checked to miss
  143. unsigned int maxMissProbes; // max entries checked to miss
  144. unsigned int flushCount; // cache flush tally
  145. unsigned int flushedEntries; // sum cache entries flushed
  146. unsigned int maxFlushedEntries; // max cache entries flushed
  147. } CacheInstrumentation;
  148. #define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
  149. #endif
  150. /* Cache filling and flushing instrumentation */
  151. static int totalCacheFills = 0;
  152. #ifdef OBJC_INSTRUMENTED
  153. unsigned int LinearFlushCachesCount = 0;
  154. unsigned int LinearFlushCachesVisitedCount = 0;
  155. unsigned int MaxLinearFlushCachesVisitedCount = 0;
  156. unsigned int NonlinearFlushCachesCount = 0;
  157. unsigned int NonlinearFlushCachesClassCount = 0;
  158. unsigned int NonlinearFlushCachesVisitedCount = 0;
  159. unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
  160. unsigned int IdealFlushCachesCount = 0;
  161. unsigned int MaxIdealFlushCachesCount = 0;
  162. #endif
  163. /***********************************************************************
  164. * A static empty cache. All classes initially point at this cache.
  165. * When the first message is sent it misses in the cache, and when
  166. * the cache is grown it checks for this case and uses malloc rather
  167. * than realloc. This avoids the need to check for NULL caches in the
  168. * messenger.
  169. ***********************************************************************/
  170. struct objc_cache _objc_empty_cache =
  171. {
  172. 0, // mask
  173. 0, // occupied
  174. { NULL } // buckets
  175. };
  176. #ifdef OBJC_INSTRUMENTED
  177. CacheInstrumentation emptyCacheInstrumentation = {0};
  178. #endif
  179. /* Local prototypes */
  180. static bool _cache_isEmpty(Cache cache);
  181. static Cache _cache_malloc(uintptr_t slotCount);
  182. static Cache _cache_create(Class cls);
  183. static Cache _cache_expand(Class cls);
  184. static int _collecting_in_critical(void);
  185. static void _garbage_make_room(void);
  186. static void _cache_collect_free(void *data, size_t size);
  187. #if defined(CACHE_ALLOCATOR)
  188. static bool cache_allocator_is_block(void *block);
  189. static Cache cache_allocator_calloc(size_t size);
  190. static void cache_allocator_free(void *block);
  191. #endif
  192. /***********************************************************************
  193. * Cache statistics for OBJC_PRINT_CACHE_SETUP
  194. **********************************************************************/
  195. static unsigned int cache_counts[16];
  196. static size_t cache_allocations;
  197. static size_t cache_collections;
  198. static size_t cache_allocator_regions;
  199. static size_t log2u(size_t x)
  200. {
  201. unsigned int log;
  202. log = 0;
  203. while (x >>= 1)
  204. log += 1;
  205. return log;
  206. }
  207. /***********************************************************************
  208. * _cache_isEmpty.
  209. * Returns YES if the given cache is some empty cache.
  210. * Empty caches should never be allocated on the heap.
  211. **********************************************************************/
  212. static bool _cache_isEmpty(Cache cache)
  213. {
  214. return (cache == NULL || cache == (Cache)&_objc_empty_cache || cache->mask == 0);
  215. }
  216. /***********************************************************************
  217. * _cache_malloc.
  218. *
  219. * Called from _cache_create() and cache_expand()
  220. * Cache locks: cacheUpdateLock must be held by the caller.
  221. **********************************************************************/
  222. static Cache _cache_malloc(uintptr_t slotCount)
  223. {
  224. Cache new_cache;
  225. size_t size;
  226. cacheUpdateLock.assertLocked();
  227. // Allocate table (why not check for failure?)
  228. size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
  229. #if defined(OBJC_INSTRUMENTED)
  230. // Custom cache allocator can't handle instrumentation.
  231. size += sizeof(CacheInstrumentation);
  232. new_cache = calloc(size, 1);
  233. new_cache->mask = slotCount - 1;
  234. #elif !defined(CACHE_ALLOCATOR)
  235. // fixme cache allocator implementation isn't 64-bit clean
  236. new_cache = calloc(size, 1);
  237. new_cache->mask = (unsigned int)(slotCount - 1);
  238. #else
  239. if (size < CACHE_ALLOCATOR_MIN) {
  240. new_cache = (Cache)calloc(size, 1);
  241. new_cache->mask = slotCount - 1;
  242. // occupied and buckets and instrumentation are all zero
  243. } else {
  244. new_cache = cache_allocator_calloc(size);
  245. // mask is already set
  246. // occupied and buckets and instrumentation are all zero
  247. }
  248. #endif
  249. if (PrintCaches) {
  250. size_t bucket = log2u(slotCount);
  251. if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
  252. cache_counts[bucket]++;
  253. }
  254. cache_allocations++;
  255. }
  256. return new_cache;
  257. }
  258. /***********************************************************************
  259. * _cache_free_block.
  260. *
  261. * Called from _cache_free() and _cache_collect_free().
  262. * block may be a cache or a forward:: entry.
  263. * If block is a cache, forward:: entries it points to will NOT be freed.
  264. * Cache locks: cacheUpdateLock must be held by the caller.
  265. **********************************************************************/
  266. static inline int isPowerOf2(unsigned long l) { return 1 == __builtin_popcountl(l); }
  267. static void _cache_free_block(void *block)
  268. {
  269. cacheUpdateLock.assertLocked();
  270. #if !TARGET_OS_WIN32
  271. if (PrintCaches) {
  272. Cache cache = (Cache)block;
  273. size_t slotCount = cache->mask + 1;
  274. if (isPowerOf2(slotCount)) {
  275. size_t bucket = log2u(slotCount);
  276. if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
  277. cache_counts[bucket]--;
  278. }
  279. }
  280. }
  281. #endif
  282. #if defined(CACHE_ALLOCATOR)
  283. if (cache_allocator_is_block(block)) {
  284. cache_allocator_free(block);
  285. } else
  286. #endif
  287. {
  288. free(block);
  289. }
  290. }
  291. /***********************************************************************
  292. * _cache_free.
  293. *
  294. * Called from _objc_remove_classes_in_image().
  295. * forward:: entries in the cache ARE freed.
  296. * Cache locks: cacheUpdateLock must NOT be held by the caller.
  297. **********************************************************************/
  298. void _cache_free(Cache cache)
  299. {
  300. unsigned int i;
  301. mutex_locker_t lock(cacheUpdateLock);
  302. for (i = 0; i < cache->mask + 1; i++) {
  303. cache_entry *entry = (cache_entry *)cache->buckets[i];
  304. if (entry && entry->imp == _objc_msgForward_impcache) {
  305. _cache_free_block(entry);
  306. }
  307. }
  308. _cache_free_block(cache);
  309. }
  310. /***********************************************************************
  311. * _cache_create.
  312. *
  313. * Called from _cache_expand().
  314. * Cache locks: cacheUpdateLock must be held by the caller.
  315. **********************************************************************/
  316. static Cache _cache_create(Class cls)
  317. {
  318. Cache new_cache;
  319. cacheUpdateLock.assertLocked();
  320. // Allocate new cache block
  321. new_cache = _cache_malloc(INIT_CACHE_SIZE);
  322. // Install the cache
  323. cls->cache = new_cache;
  324. // Clear the grow flag so that we will re-use the current storage,
  325. // rather than actually grow the cache, when expanding the cache
  326. // for the first time
  327. if (_class_slow_grow) {
  328. cls->setShouldGrowCache(false);
  329. }
  330. // Return our creation
  331. return new_cache;
  332. }
  333. /***********************************************************************
  334. * _cache_expand.
  335. *
  336. * Called from _cache_fill ()
  337. * Cache locks: cacheUpdateLock must be held by the caller.
  338. **********************************************************************/
  339. static Cache _cache_expand(Class cls)
  340. {
  341. Cache old_cache;
  342. Cache new_cache;
  343. uintptr_t slotCount;
  344. uintptr_t index;
  345. cacheUpdateLock.assertLocked();
  346. // First growth goes from empty cache to a real one
  347. old_cache = cls->cache;
  348. if (_cache_isEmpty(old_cache))
  349. return _cache_create (cls);
  350. if (_class_slow_grow) {
  351. // Cache grows every other time only.
  352. if (cls->shouldGrowCache()) {
  353. // Grow the cache this time. Don't grow next time.
  354. cls->setShouldGrowCache(false);
  355. }
  356. else {
  357. // Reuse the current cache storage this time. Do grow next time.
  358. cls->setShouldGrowCache(true);
  359. // Clear the valid-entry counter
  360. old_cache->occupied = 0;
  361. // Invalidate all the cache entries
  362. for (index = 0; index < old_cache->mask + 1; index += 1)
  363. {
  364. // Remember what this entry was, so we can possibly
  365. // deallocate it after the bucket has been invalidated
  366. cache_entry *oldEntry = (cache_entry *)old_cache->buckets[index];
  367. // Skip invalid entry
  368. if (!oldEntry)
  369. continue;
  370. // Invalidate this entry
  371. old_cache->buckets[index] = NULL;
  372. // Deallocate "forward::" entry
  373. if (oldEntry->imp == _objc_msgForward_impcache) {
  374. _cache_collect_free (oldEntry, sizeof(cache_entry));
  375. }
  376. }
  377. // Return the same old cache, freshly emptied
  378. return old_cache;
  379. }
  380. }
  381. // Double the cache size
  382. slotCount = (old_cache->mask + 1) << 1;
  383. new_cache = _cache_malloc(slotCount);
  384. #ifdef OBJC_INSTRUMENTED
  385. // Propagate the instrumentation data
  386. {
  387. CacheInstrumentation *oldCacheData;
  388. CacheInstrumentation *newCacheData;
  389. oldCacheData = CACHE_INSTRUMENTATION(old_cache);
  390. newCacheData = CACHE_INSTRUMENTATION(new_cache);
  391. bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
  392. }
  393. #endif
  394. // Deallocate "forward::" entries from the old cache
  395. for (index = 0; index < old_cache->mask + 1; index++) {
  396. cache_entry *entry = (cache_entry *)old_cache->buckets[index];
  397. if (entry && entry->imp == _objc_msgForward_impcache) {
  398. _cache_collect_free (entry, sizeof(cache_entry));
  399. }
  400. }
  401. // Install new cache
  402. cls->cache = new_cache;
  403. // Deallocate old cache, try freeing all the garbage
  404. _cache_collect_free (old_cache, old_cache->mask * sizeof(cache_entry *));
  405. _cache_collect(false);
  406. return new_cache;
  407. }
  408. /***********************************************************************
  409. * _cache_fill. Add the specified method to the specified class' cache.
  410. * Returns NO if the cache entry wasn't added: cache was busy,
  411. * class is still being initialized, new entry is a duplicate.
  412. *
  413. * Called only from _class_lookupMethodAndLoadCache and
  414. * class_respondsToMethod and _cache_addForwardEntry.
  415. *
  416. * Cache locks: cacheUpdateLock must not be held.
  417. **********************************************************************/
  418. bool _cache_fill(Class cls, Method smt, SEL sel)
  419. {
  420. uintptr_t newOccupied;
  421. uintptr_t index;
  422. cache_entry **buckets;
  423. cache_entry *entry;
  424. Cache cache;
  425. cacheUpdateLock.assertUnlocked();
  426. // Never cache before +initialize is done
  427. if (!cls->isInitialized()) {
  428. return NO;
  429. }
  430. // Keep tally of cache additions
  431. totalCacheFills += 1;
  432. mutex_locker_t lock(cacheUpdateLock);
  433. entry = (cache_entry *)smt;
  434. cache = cls->cache;
  435. // Make sure the entry wasn't added to the cache by some other thread
  436. // before we grabbed the cacheUpdateLock.
  437. // Don't use _cache_getMethod() because _cache_getMethod() doesn't
  438. // return forward:: entries.
  439. if (_cache_getImp(cls, sel)) {
  440. return NO; // entry is already cached, didn't add new one
  441. }
  442. // Use the cache as-is if it is less than 3/4 full
  443. newOccupied = cache->occupied + 1;
  444. if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
  445. // Cache is less than 3/4 full.
  446. cache->occupied = (unsigned int)newOccupied;
  447. } else {
  448. // Cache is too full. Expand it.
  449. cache = _cache_expand (cls);
  450. // Account for the addition
  451. cache->occupied += 1;
  452. }
  453. // Scan for the first unused slot and insert there.
  454. // There is guaranteed to be an empty slot because the
  455. // minimum size is 4 and we resized at 3/4 full.
  456. buckets = (cache_entry **)cache->buckets;
  457. for (index = CACHE_HASH(sel, cache->mask);
  458. buckets[index] != NULL;
  459. index = (index+1) & cache->mask)
  460. {
  461. // empty
  462. }
  463. buckets[index] = entry;
  464. return YES; // successfully added new cache entry
  465. }
  466. /***********************************************************************
  467. * _cache_addForwardEntry
  468. * Add a forward:: entry for the given selector to cls's method cache.
  469. * Does nothing if the cache addition fails for any reason.
  470. * Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
  471. * Cache locks: cacheUpdateLock must not be held.
  472. **********************************************************************/
  473. void _cache_addForwardEntry(Class cls, SEL sel)
  474. {
  475. cache_entry *smt;
  476. smt = (cache_entry *)malloc(sizeof(cache_entry));
  477. smt->name = sel;
  478. smt->imp = _objc_msgForward_impcache;
  479. if (! _cache_fill(cls, (Method)smt, sel)) { // fixme hack
  480. // Entry not added to cache. Don't leak the method struct.
  481. free(smt);
  482. }
  483. }
  484. /***********************************************************************
  485. * _cache_flush. Invalidate all valid entries in the given class' cache.
  486. *
  487. * Called from flush_caches() and _cache_fill()
  488. * Cache locks: cacheUpdateLock must be held by the caller.
  489. **********************************************************************/
  490. void _cache_flush(Class cls)
  491. {
  492. Cache cache;
  493. unsigned int index;
  494. cacheUpdateLock.assertLocked();
  495. // Locate cache. Ignore unused cache.
  496. cache = cls->cache;
  497. if (_cache_isEmpty(cache)) return;
  498. #ifdef OBJC_INSTRUMENTED
  499. {
  500. CacheInstrumentation *cacheData;
  501. // Tally this flush
  502. cacheData = CACHE_INSTRUMENTATION(cache);
  503. cacheData->flushCount += 1;
  504. cacheData->flushedEntries += cache->occupied;
  505. if (cache->occupied > cacheData->maxFlushedEntries)
  506. cacheData->maxFlushedEntries = cache->occupied;
  507. }
  508. #endif
  509. // Traverse the cache
  510. for (index = 0; index <= cache->mask; index += 1)
  511. {
  512. // Remember what this entry was, so we can possibly
  513. // deallocate it after the bucket has been invalidated
  514. cache_entry *oldEntry = (cache_entry *)cache->buckets[index];
  515. // Invalidate this entry
  516. cache->buckets[index] = NULL;
  517. // Deallocate "forward::" entry
  518. if (oldEntry && oldEntry->imp == _objc_msgForward_impcache)
  519. _cache_collect_free (oldEntry, sizeof(cache_entry));
  520. }
  521. // Clear the valid-entry counter
  522. cache->occupied = 0;
  523. }
  524. /***********************************************************************
  525. * flush_cache. Flushes the instance method cache for class cls only.
  526. * Use flush_caches() if cls might have in-use subclasses.
  527. **********************************************************************/
  528. void flush_cache(Class cls)
  529. {
  530. if (cls) {
  531. mutex_locker_t lock(cacheUpdateLock);
  532. _cache_flush(cls);
  533. }
  534. }
  535. /***********************************************************************
  536. * cache collection.
  537. **********************************************************************/
  538. #if !TARGET_OS_WIN32
  539. // A sentinel (magic value) to report bad thread_get_state status.
  540. // Must not be a valid PC.
  541. // Must not be zero - thread_get_state() on a new thread returns PC == 0.
  542. #define PC_SENTINEL 1
  543. // UNIX03 compliance hack (4508809)
  544. #if !__DARWIN_UNIX03
  545. #define __srr0 srr0
  546. #define __eip eip
  547. #endif
  548. static uintptr_t _get_pc_for_thread(thread_t thread)
  549. #if defined(__i386__)
  550. {
  551. i386_thread_state_t state;
  552. unsigned int count = i386_THREAD_STATE_COUNT;
  553. kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
  554. return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
  555. }
  556. #elif defined(__x86_64__)
  557. {
  558. x86_thread_state64_t state;
  559. unsigned int count = x86_THREAD_STATE64_COUNT;
  560. kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
  561. return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
  562. }
  563. #elif defined(__arm__)
  564. {
  565. arm_thread_state_t state;
  566. unsigned int count = ARM_THREAD_STATE_COUNT;
  567. kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
  568. return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
  569. }
  570. #else
  571. {
  572. #error _get_pc_for_thread () not implemented for this architecture
  573. }
  574. #endif
  575. #endif
  576. /***********************************************************************
  577. * _collecting_in_critical.
  578. * Returns TRUE if some thread is currently executing a cache-reading
  579. * function. Collection of cache garbage is not allowed when a cache-
  580. * reading function is in progress because it might still be using
  581. * the garbage memory.
  582. **********************************************************************/
  583. typedef struct {
  584. uint64_t location;
  585. unsigned short length;
  586. unsigned short recovery_offs;
  587. unsigned int flags;
  588. } task_restartable_range_t;
  589. extern "C" task_restartable_range_t objc_restartableRanges[];
  590. static int _collecting_in_critical(void)
  591. {
  592. #if TARGET_OS_WIN32
  593. return TRUE;
  594. #else
  595. thread_act_port_array_t threads;
  596. unsigned number;
  597. unsigned count;
  598. kern_return_t ret;
  599. int result;
  600. mach_port_t mythread = pthread_mach_thread_np(objc_thread_self());
  601. // Get a list of all the threads in the current task
  602. ret = task_threads (mach_task_self (), &threads, &number);
  603. if (ret != KERN_SUCCESS)
  604. {
  605. _objc_fatal("task_thread failed (result %d)\n", ret);
  606. }
  607. // Check whether any thread is in the cache lookup code
  608. result = FALSE;
  609. for (count = 0; count < number; count++)
  610. {
  611. int region;
  612. uintptr_t pc;
  613. // Don't bother checking ourselves
  614. if (threads[count] == mythread)
  615. continue;
  616. // Find out where thread is executing
  617. pc = _get_pc_for_thread (threads[count]);
  618. // Check for bad status, and if so, assume the worse (can't collect)
  619. if (pc == PC_SENTINEL)
  620. {
  621. result = TRUE;
  622. goto done;
  623. }
  624. // Check whether it is in the cache lookup code
  625. for (region = 0; objc_restartableRanges[region].location != 0; region++)
  626. {
  627. uint32_t loc = (uint32_t)objc_restartableRanges[region].location;
  628. if ((pc > loc) &&
  629. (pc - loc) < objc_restartableRanges[region].length)
  630. {
  631. result = TRUE;
  632. goto done;
  633. }
  634. }
  635. }
  636. done:
  637. // Deallocate the port rights for the threads
  638. for (count = 0; count < number; count++) {
  639. mach_port_deallocate(mach_task_self (), threads[count]);
  640. }
  641. // Deallocate the thread list
  642. vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
  643. // Return our finding
  644. return result;
  645. #endif
  646. }
  647. /***********************************************************************
  648. * _garbage_make_room. Ensure that there is enough room for at least
  649. * one more ref in the garbage.
  650. **********************************************************************/
  651. // amount of memory represented by all refs in the garbage
  652. static size_t garbage_byte_size = 0;
  653. // do not empty the garbage until garbage_byte_size gets at least this big
  654. static size_t garbage_threshold = 1024;
  655. // table of refs to free
  656. static void **garbage_refs = 0;
  657. // current number of refs in garbage_refs
  658. static size_t garbage_count = 0;
  659. // capacity of current garbage_refs
  660. static size_t garbage_max = 0;
  661. // capacity of initial garbage_refs
  662. enum {
  663. INIT_GARBAGE_COUNT = 128
  664. };
  665. static void _garbage_make_room(void)
  666. {
  667. static int first = 1;
  668. // Create the collection table the first time it is needed
  669. if (first)
  670. {
  671. first = 0;
  672. garbage_refs = (void**)
  673. malloc(INIT_GARBAGE_COUNT * sizeof(void *));
  674. garbage_max = INIT_GARBAGE_COUNT;
  675. }
  676. // Double the table if it is full
  677. else if (garbage_count == garbage_max)
  678. {
  679. garbage_refs = (void**)
  680. realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
  681. garbage_max *= 2;
  682. }
  683. }
  684. /***********************************************************************
  685. * _cache_collect_free. Add the specified malloc'd memory to the list
  686. * of them to free at some later point.
  687. * size is used for the collection threshold. It does not have to be
  688. * precisely the block's size.
  689. * Cache locks: cacheUpdateLock must be held by the caller.
  690. **********************************************************************/
  691. static void _cache_collect_free(void *data, size_t size)
  692. {
  693. cacheUpdateLock.assertLocked();
  694. _garbage_make_room ();
  695. garbage_byte_size += size;
  696. garbage_refs[garbage_count++] = data;
  697. }
  698. /***********************************************************************
  699. * _cache_collect. Try to free accumulated dead caches.
  700. * collectALot tries harder to free memory.
  701. * Cache locks: cacheUpdateLock must be held by the caller.
  702. **********************************************************************/
  703. void _cache_collect(bool collectALot)
  704. {
  705. cacheUpdateLock.assertLocked();
  706. // Done if the garbage is not full
  707. if (garbage_byte_size < garbage_threshold && !collectALot) {
  708. return;
  709. }
  710. // Synchronize collection with objc_msgSend and other cache readers
  711. if (!collectALot) {
  712. if (_collecting_in_critical ()) {
  713. // objc_msgSend (or other cache reader) is currently looking in
  714. // the cache and might still be using some garbage.
  715. if (PrintCaches) {
  716. _objc_inform ("CACHES: not collecting; "
  717. "objc_msgSend in progress");
  718. }
  719. return;
  720. }
  721. }
  722. else {
  723. // No excuses.
  724. while (_collecting_in_critical())
  725. ;
  726. }
  727. // No cache readers in progress - garbage is now deletable
  728. // Log our progress
  729. if (PrintCaches) {
  730. cache_collections++;
  731. _objc_inform ("CACHES: COLLECTING %zu bytes (%zu regions, %zu allocations, %zu collections)", garbage_byte_size, cache_allocator_regions, cache_allocations, cache_collections);
  732. }
  733. // Dispose all refs now in the garbage
  734. while (garbage_count--) {
  735. _cache_free_block(garbage_refs[garbage_count]);
  736. }
  737. // Clear the garbage count and total size indicator
  738. garbage_count = 0;
  739. garbage_byte_size = 0;
  740. if (PrintCaches) {
  741. size_t i;
  742. size_t total = 0;
  743. size_t ideal_total = 0;
  744. size_t malloc_total = 0;
  745. size_t local_total = 0;
  746. for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
  747. int count = cache_counts[i];
  748. int slots = 1 << i;
  749. size_t size = sizeof(struct objc_cache) + TABLE_SIZE(slots);
  750. size_t ideal = size;
  751. #if TARGET_OS_WIN32
  752. size_t malloc = size;
  753. #else
  754. size_t malloc = malloc_good_size(size);
  755. #endif
  756. size_t local = size < CACHE_ALLOCATOR_MIN ? malloc : cache_allocator_size_for_mask(cache_allocator_mask_for_size(size));
  757. if (!count) continue;
  758. _objc_inform("CACHES: %4d slots: %4d caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", slots, count, ideal*count, malloc*count, local*count, malloc*count-ideal*count, local*count-ideal*count);
  759. total += count;
  760. ideal_total += ideal*count;
  761. malloc_total += malloc*count;
  762. local_total += local*count;
  763. }
  764. _objc_inform("CACHES: total: %4zu caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", total, ideal_total, malloc_total, local_total, malloc_total-ideal_total, local_total-ideal_total);
  765. }
  766. }
  767. #if defined(CACHE_ALLOCATOR)
  768. /***********************************************************************
  769. * Custom method cache allocator.
  770. * Method cache block sizes are 2^slots+2 words, which is a pessimal
  771. * case for the system allocator. It wastes 504 bytes per cache block
  772. * with 128 or more slots, which adds up to tens of KB for an AppKit process.
  773. * To save memory, the custom cache allocator below is used.
  774. *
  775. * The cache allocator uses 128 KB allocation regions. Few processes will
  776. * require a second region. Within a region, allocation is address-ordered
  777. * first fit.
  778. *
  779. * The cache allocator uses a quantum of 520.
  780. * Cache block ideal sizes: 520, 1032, 2056, 4104
  781. * Cache allocator sizes: 520, 1040, 2080, 4160
  782. *
  783. * Because all blocks are known to be genuine method caches, the ordinary
  784. * cache->mask and cache->occupied fields are used as block headers.
  785. * No out-of-band headers are maintained. The number of blocks will
  786. * almost always be fewer than 200, so for simplicity there is no free
  787. * list or other optimization.
  788. *
  789. * Block in use: mask != 0, occupied != -1 (mask indicates block size)
  790. * Block free: mask != 0, occupied == -1 (mask is precisely block size)
  791. *
  792. * No cache allocator functions take any locks. Instead, the caller
  793. * must hold the cacheUpdateLock.
  794. *
  795. * fixme with 128 KB regions and 520 B min block size, an allocation
  796. * bitmap would be only 32 bytes - better than free list?
  797. **********************************************************************/
  798. typedef struct cache_allocator_block {
  799. uintptr_t size;
  800. uintptr_t state;
  801. struct cache_allocator_block *nextFree;
  802. } cache_allocator_block;
  803. typedef struct cache_allocator_region {
  804. cache_allocator_block *start;
  805. cache_allocator_block *end; // first non-block address
  806. cache_allocator_block *freeList;
  807. struct cache_allocator_region *next;
  808. } cache_allocator_region;
  809. static cache_allocator_region *cacheRegion = NULL;
  810. /***********************************************************************
  811. * cache_allocator_add_region
  812. * Allocates and returns a new region that can hold at least size
  813. * bytes of large method caches.
  814. * The actual size will be rounded up to a CACHE_QUANTUM boundary,
  815. * with a minimum of CACHE_REGION_SIZE.
  816. * The new region is lowest-priority for new allocations. Callers that
  817. * know the other regions are already full should allocate directly
  818. * into the returned region.
  819. **********************************************************************/
  820. static cache_allocator_region *cache_allocator_add_region(size_t size)
  821. {
  822. vm_address_t addr;
  823. cache_allocator_block *b;
  824. cache_allocator_region **rgnP;
  825. cache_allocator_region *newRegion = (cache_allocator_region *)
  826. calloc(1, sizeof(cache_allocator_region));
  827. // Round size up to quantum boundary, and apply the minimum size.
  828. size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
  829. if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
  830. // Allocate the region
  831. addr = (vm_address_t)calloc(size, 1);
  832. newRegion->start = (cache_allocator_block *)addr;
  833. newRegion->end = (cache_allocator_block *)(addr + size);
  834. // Mark the first block: free and covers the entire region
  835. b = newRegion->start;
  836. b->size = size;
  837. b->state = (uintptr_t)-1;
  838. b->nextFree = NULL;
  839. newRegion->freeList = b;
  840. // Add to end of the linked list of regions.
  841. // Other regions should be re-used before this one is touched.
  842. newRegion->next = NULL;
  843. rgnP = &cacheRegion;
  844. while (*rgnP) {
  845. rgnP = &(**rgnP).next;
  846. }
  847. *rgnP = newRegion;
  848. cache_allocator_regions++;
  849. return newRegion;
  850. }
  851. /***********************************************************************
  852. * cache_allocator_coalesce
  853. * Attempts to coalesce a free block with the single free block following
  854. * it in the free list, if any.
  855. **********************************************************************/
  856. static void cache_allocator_coalesce(cache_allocator_block *block)
  857. {
  858. if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
  859. block->size += block->nextFree->size;
  860. block->nextFree = block->nextFree->nextFree;
  861. }
  862. }
  863. /***********************************************************************
  864. * cache_region_calloc
  865. * Attempt to allocate a size-byte block in the given region.
  866. * Allocation is first-fit. The free list is already fully coalesced.
  867. * Returns NULL if there is not enough room in the region for the block.
  868. **********************************************************************/
  869. static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
  870. {
  871. cache_allocator_block **blockP;
  872. uintptr_t mask;
  873. // Save mask for allocated block, then round size
  874. // up to CACHE_QUANTUM boundary
  875. mask = cache_allocator_mask_for_size(size);
  876. size = cache_allocator_size_for_mask(mask);
  877. // Search the free list for a sufficiently large free block.
  878. for (blockP = &rgn->freeList;
  879. *blockP != NULL;
  880. blockP = &(**blockP).nextFree)
  881. {
  882. cache_allocator_block *block = *blockP;
  883. if (block->size < size) continue; // not big enough
  884. // block is now big enough. Allocate from it.
  885. // Slice off unneeded fragment of block, if any,
  886. // and reconnect the free list around block.
  887. if (block->size - size >= CACHE_QUANTUM) {
  888. cache_allocator_block *leftover =
  889. (cache_allocator_block *)(size + (uintptr_t)block);
  890. leftover->size = block->size - size;
  891. leftover->state = (uintptr_t)-1;
  892. leftover->nextFree = block->nextFree;
  893. *blockP = leftover;
  894. } else {
  895. *blockP = block->nextFree;
  896. }
  897. // block is now exactly the right size.
  898. bzero(block, size);
  899. block->size = mask; // Cache->mask
  900. block->state = 0; // Cache->occupied
  901. return block;
  902. }
  903. // No room in this region.
  904. return NULL;
  905. }
  906. /***********************************************************************
  907. * cache_allocator_calloc
  908. * Custom allocator for large method caches (128+ slots)
  909. * The returned cache block already has cache->mask set.
  910. * cache->occupied and the cache contents are zero.
  911. * Cache locks: cacheUpdateLock must be held by the caller
  912. **********************************************************************/
  913. static Cache cache_allocator_calloc(size_t size)
  914. {
  915. cache_allocator_region *rgn;
  916. cacheUpdateLock.assertLocked();
  917. for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
  918. void *p = cache_region_calloc(rgn, size);
  919. if (p) {
  920. return (Cache)p;
  921. }
  922. }
  923. // No regions or all regions full - make a region and try one more time
  924. // In the unlikely case of a cache over 256KB, it will get its own region.
  925. return (Cache)cache_region_calloc(cache_allocator_add_region(size), size);
  926. }
  927. /***********************************************************************
  928. * cache_allocator_region_for_block
  929. * Returns the cache allocator region that ptr points into, or NULL.
  930. **********************************************************************/
  931. static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
  932. {
  933. cache_allocator_region *rgn;
  934. for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
  935. if (block >= rgn->start && block < rgn->end) return rgn;
  936. }
  937. return NULL;
  938. }
  939. /***********************************************************************
  940. * cache_allocator_is_block
  941. * If ptr is a live block from the cache allocator, return YES
  942. * If ptr is a block from some other allocator, return NO.
  943. * If ptr is a dead block from the cache allocator, result is undefined.
  944. * Cache locks: cacheUpdateLock must be held by the caller
  945. **********************************************************************/
  946. static bool cache_allocator_is_block(void *ptr)
  947. {
  948. cacheUpdateLock.assertLocked();
  949. return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
  950. }
  951. /***********************************************************************
  952. * cache_allocator_free
  953. * Frees a block allocated by the cache allocator.
  954. * Cache locks: cacheUpdateLock must be held by the caller.
  955. **********************************************************************/
  956. static void cache_allocator_free(void *ptr)
  957. {
  958. cache_allocator_block *dead = (cache_allocator_block *)ptr;
  959. cache_allocator_block *cur;
  960. cache_allocator_region *rgn;
  961. cacheUpdateLock.assertLocked();
  962. if (! (rgn = cache_allocator_region_for_block(dead))) {
  963. // free of non-pointer
  964. _objc_inform("cache_allocator_free of non-pointer %p", dead);
  965. return;
  966. }
  967. dead->size = cache_allocator_size_for_mask(dead->size);
  968. dead->state = (uintptr_t)-1;
  969. if (!rgn->freeList || rgn->freeList > dead) {
  970. // dead block belongs at front of free list
  971. dead->nextFree = rgn->freeList;
  972. rgn->freeList = dead;
  973. cache_allocator_coalesce(dead);
  974. return;
  975. }
  976. // dead block belongs in the middle or end of free list
  977. for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
  978. cache_allocator_block *ahead = cur->nextFree;
  979. if (!ahead || ahead > dead) {
  980. // cur and ahead straddle dead, OR dead belongs at end of free list
  981. cur->nextFree = dead;
  982. dead->nextFree = ahead;
  983. // coalesce into dead first in case both succeed
  984. cache_allocator_coalesce(dead);
  985. cache_allocator_coalesce(cur);
  986. return;
  987. }
  988. }
  989. // uh-oh
  990. _objc_inform("cache_allocator_free of non-pointer %p", ptr);
  991. }
  992. // defined(CACHE_ALLOCATOR)
  993. #endif
  994. /***********************************************************************
  995. * Cache instrumentation and debugging
  996. **********************************************************************/
  997. #ifdef OBJC_INSTRUMENTED
  998. enum {
  999. CACHE_HISTOGRAM_SIZE = 512
  1000. };
  1001. unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
  1002. unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
  1003. #endif
  1004. /***********************************************************************
  1005. * _cache_print.
  1006. **********************************************************************/
  1007. static void _cache_print(Cache cache)
  1008. {
  1009. uintptr_t index;
  1010. uintptr_t count;
  1011. count = cache->mask + 1;
  1012. for (index = 0; index < count; index += 1) {
  1013. cache_entry *entry = (cache_entry *)cache->buckets[index];
  1014. if (entry) {
  1015. if (entry->imp == _objc_msgForward_impcache)
  1016. printf ("does not recognize: \n");
  1017. printf ("%s\n", sel_getName(entry->name));
  1018. }
  1019. }
  1020. }
  1021. /***********************************************************************
  1022. * _class_printMethodCaches.
  1023. **********************************************************************/
  1024. void _class_printMethodCaches(Class cls)
  1025. {
  1026. if (_cache_isEmpty(cls->cache)) {
  1027. printf("no instance-method cache for class %s\n",cls->nameForLogging());
  1028. } else {
  1029. printf("instance-method cache for class %s:\n", cls->nameForLogging());
  1030. _cache_print(cls->cache);
  1031. }
  1032. if (_cache_isEmpty(cls->ISA()->cache)) {
  1033. printf("no class-method cache for class %s\n", cls->nameForLogging());
  1034. } else {
  1035. printf ("class-method cache for class %s:\n", cls->nameForLogging());
  1036. _cache_print(cls->ISA()->cache);
  1037. }
  1038. }
  1039. #if 0
  1040. #warning fixme
  1041. /***********************************************************************
  1042. * _class_printDuplicateCacheEntries.
  1043. **********************************************************************/
  1044. void _class_printDuplicateCacheEntries(bool detail)
  1045. {
  1046. NXHashState state;
  1047. Class cls;
  1048. unsigned int duplicates;
  1049. unsigned int index1;
  1050. unsigned int index2;
  1051. unsigned int mask;
  1052. unsigned int count;
  1053. unsigned int isMeta;
  1054. Cache cache;
  1055. printf ("Checking for duplicate cache entries \n");
  1056. // Outermost loop - iterate over all classes
  1057. state = NXInitHashState (class_hash);
  1058. duplicates = 0;
  1059. while (NXNextHashState (class_hash, &state, (void **) &cls))
  1060. {
  1061. // Control loop - do given class' cache, then its isa's cache
  1062. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1063. {
  1064. // Select cache of interest and make sure it exists
  1065. cache = (isMeta ? cls->ISA : cls)->cache;
  1066. if (_cache_isEmpty(cache))
  1067. continue;
  1068. // Middle loop - check each entry in the given cache
  1069. mask = cache->mask;
  1070. count = mask + 1;
  1071. for (index1 = 0; index1 < count; index1 += 1)
  1072. {
  1073. // Skip invalid entry
  1074. if (!cache->buckets[index1])
  1075. continue;
  1076. // Inner loop - check that given entry matches no later entry
  1077. for (index2 = index1 + 1; index2 < count; index2 += 1)
  1078. {
  1079. // Skip invalid entry
  1080. if (!cache->buckets[index2])
  1081. continue;
  1082. // Check for duplication by method name comparison
  1083. if (strcmp ((char *) cache->buckets[index1]->name),
  1084. (char *) cache->buckets[index2]->name)) == 0)
  1085. {
  1086. if (detail)
  1087. printf ("%s %s\n", cls->nameForLogging(), sel_getName(cache->buckets[index1]->name));
  1088. duplicates += 1;
  1089. break;
  1090. }
  1091. }
  1092. }
  1093. }
  1094. }
  1095. // Log the findings
  1096. printf ("duplicates = %d\n", duplicates);
  1097. printf ("total cache fills = %d\n", totalCacheFills);
  1098. }
  1099. /***********************************************************************
  1100. * PrintCacheHeader.
  1101. **********************************************************************/
  1102. static void PrintCacheHeader(void)
  1103. {
  1104. #ifdef OBJC_INSTRUMENTED
  1105. printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
  1106. printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
  1107. printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
  1108. #else
  1109. printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
  1110. printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
  1111. printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
  1112. #endif
  1113. }
  1114. /***********************************************************************
  1115. * PrintCacheInfo.
  1116. **********************************************************************/
  1117. static void PrintCacheInfo(unsigned int cacheSize,
  1118. unsigned int cacheCount,
  1119. unsigned int slotsUsed,
  1120. float avgUsed, unsigned int maxUsed,
  1121. float avgSHit, unsigned int maxSHit,
  1122. float avgSMiss, unsigned int maxSMiss
  1123. #ifdef OBJC_INSTRUMENTED
  1124. , unsigned int totDHits,
  1125. float avgDHit,
  1126. unsigned int maxDHit,
  1127. unsigned int totDMisses,
  1128. float avgDMiss,
  1129. unsigned int maxDMiss,
  1130. unsigned int totDFlsh,
  1131. float avgDFlsh,
  1132. unsigned int maxDFlsh
  1133. #endif
  1134. )
  1135. {
  1136. #ifdef OBJC_INSTRUMENTED
  1137. printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
  1138. #else
  1139. printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
  1140. #endif
  1141. cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
  1142. #ifdef OBJC_INSTRUMENTED
  1143. , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
  1144. #endif
  1145. );
  1146. }
  1147. #ifdef OBJC_INSTRUMENTED
  1148. /***********************************************************************
  1149. * PrintCacheHistogram. Show the non-zero entries from the specified
  1150. * cache histogram.
  1151. **********************************************************************/
  1152. static void PrintCacheHistogram(char *title,
  1153. unsigned int *firstEntry,
  1154. unsigned int entryCount)
  1155. {
  1156. unsigned int index;
  1157. unsigned int *thisEntry;
  1158. printf ("%s\n", title);
  1159. printf (" Probes Tally\n");
  1160. printf (" ------ -----\n");
  1161. for (index = 0, thisEntry = firstEntry;
  1162. index < entryCount;
  1163. index += 1, thisEntry += 1)
  1164. {
  1165. if (*thisEntry == 0)
  1166. continue;
  1167. printf (" %6d %5d\n", index, *thisEntry);
  1168. }
  1169. }
  1170. #endif
  1171. /***********************************************************************
  1172. * _class_printMethodCacheStatistics.
  1173. **********************************************************************/
  1174. #define MAX_LOG2_SIZE 32
  1175. #define MAX_CHAIN_SIZE 100
  1176. void _class_printMethodCacheStatistics(void)
  1177. {
  1178. unsigned int isMeta;
  1179. unsigned int index;
  1180. NXHashState state;
  1181. Class cls;
  1182. unsigned int totalChain;
  1183. unsigned int totalMissChain;
  1184. unsigned int maxChain;
  1185. unsigned int maxMissChain;
  1186. unsigned int classCount;
  1187. unsigned int negativeEntryCount;
  1188. unsigned int cacheExpandCount;
  1189. unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1190. unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1191. unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1192. unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1193. unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1194. unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1195. unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1196. unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1197. unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1198. unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
  1199. unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
  1200. #ifdef OBJC_INSTRUMENTED
  1201. unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1202. unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1203. unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1204. unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1205. unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1206. unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1207. unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1208. unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1209. unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1210. #endif
  1211. printf ("Printing cache statistics\n");
  1212. // Outermost loop - iterate over all classes
  1213. state = NXInitHashState (class_hash);
  1214. classCount = 0;
  1215. negativeEntryCount = 0;
  1216. cacheExpandCount = 0;
  1217. while (NXNextHashState (class_hash, &state, (void **) &cls))
  1218. {
  1219. // Tally classes
  1220. classCount += 1;
  1221. // Control loop - do given class' cache, then its isa's cache
  1222. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1223. {
  1224. Cache cache;
  1225. unsigned int mask;
  1226. unsigned int log2Size;
  1227. unsigned int entryCount;
  1228. // Select cache of interest
  1229. cache = (isMeta ? cls->ISA : cls)->cache;
  1230. // Ignore empty cache... should we?
  1231. if (_cache_isEmpty(cache))
  1232. continue;
  1233. // Middle loop - do each entry in the given cache
  1234. mask = cache->mask;
  1235. entryCount = 0;
  1236. totalChain = 0;
  1237. totalMissChain = 0;
  1238. maxChain = 0;
  1239. maxMissChain = 0;
  1240. for (index = 0; index < mask + 1; index += 1)
  1241. {
  1242. cache_entry **buckets;
  1243. cache_entry *entry;
  1244. unsigned int hash;
  1245. unsigned int methodChain;
  1246. unsigned int methodMissChain;
  1247. unsigned int index2;
  1248. // If entry is invalid, the only item of
  1249. // interest is that future insert hashes
  1250. // to this entry can use it directly.
  1251. buckets = (cache_entry **)cache->buckets;
  1252. if (!buckets[index])
  1253. {
  1254. missChainCount[0] += 1;
  1255. continue;
  1256. }
  1257. entry = buckets[index];
  1258. // Tally valid entries
  1259. entryCount += 1;
  1260. // Tally "forward::" entries
  1261. if (entry->imp == _objc_msgForward_impcache)
  1262. negativeEntryCount += 1;
  1263. // Calculate search distance (chain length) for this method
  1264. // The chain may wrap around to the beginning of the table.
  1265. hash = CACHE_HASH(entry->name, mask);
  1266. if (index >= hash) methodChain = index - hash;
  1267. else methodChain = (mask+1) + index - hash;
  1268. // Tally chains of this length
  1269. if (methodChain < MAX_CHAIN_SIZE)
  1270. chainCount[methodChain] += 1;
  1271. // Keep sum of all chain lengths
  1272. totalChain += methodChain;
  1273. // Record greatest chain length
  1274. if (methodChain > maxChain)
  1275. maxChain = methodChain;
  1276. // Calculate search distance for miss that hashes here
  1277. index2 = index;
  1278. while (buckets[index2])
  1279. {
  1280. index2 += 1;
  1281. index2 &= mask;
  1282. }
  1283. methodMissChain = ((index2 - index) & mask);
  1284. // Tally miss chains of this length
  1285. if (methodMissChain < MAX_CHAIN_SIZE)
  1286. missChainCount[methodMissChain] += 1;
  1287. // Keep sum of all miss chain lengths in this class
  1288. totalMissChain += methodMissChain;
  1289. // Record greatest miss chain length
  1290. if (methodMissChain > maxMissChain)
  1291. maxMissChain = methodMissChain;
  1292. }
  1293. // Factor this cache into statistics about caches of the same
  1294. // type and size (all caches are a power of two in size)
  1295. log2Size = log2u (mask + 1);
  1296. cacheCountBySize[isMeta][log2Size] += 1;
  1297. totalEntriesBySize[isMeta][log2Size] += entryCount;
  1298. if (entryCount > maxEntriesBySize[isMeta][log2Size])
  1299. maxEntriesBySize[isMeta][log2Size] = entryCount;
  1300. totalChainBySize[isMeta][log2Size] += totalChain;
  1301. totalMissChainBySize[isMeta][log2Size] += totalMissChain;
  1302. totalMaxChainBySize[isMeta][log2Size] += maxChain;
  1303. totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
  1304. if (maxChain > maxChainBySize[isMeta][log2Size])
  1305. maxChainBySize[isMeta][log2Size] = maxChain;
  1306. if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
  1307. maxMissChainBySize[isMeta][log2Size] = maxMissChain;
  1308. #ifdef OBJC_INSTRUMENTED
  1309. {
  1310. CacheInstrumentation *cacheData;
  1311. cacheData = CACHE_INSTRUMENTATION(cache);
  1312. hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
  1313. hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
  1314. if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
  1315. maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
  1316. missCountBySize[isMeta][log2Size] += cacheData->missCount;
  1317. missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
  1318. if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
  1319. maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
  1320. flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
  1321. flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
  1322. if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
  1323. maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
  1324. }
  1325. #endif
  1326. // Caches start with a power of two number of entries, and grow by doubling, so
  1327. // we can calculate the number of times this cache has expanded
  1328. cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
  1329. }
  1330. }
  1331. {
  1332. unsigned int cacheCountByType[2] = {0};
  1333. unsigned int totalCacheCount = 0;
  1334. unsigned int totalEntries = 0;
  1335. unsigned int maxEntries = 0;
  1336. unsigned int totalSlots = 0;
  1337. #ifdef OBJC_INSTRUMENTED
  1338. unsigned int totalHitCount = 0;
  1339. unsigned int totalHitProbes = 0;
  1340. unsigned int maxHitProbes = 0;
  1341. unsigned int totalMissCount = 0;
  1342. unsigned int totalMissProbes = 0;
  1343. unsigned int maxMissProbes = 0;
  1344. unsigned int totalFlushCount = 0;
  1345. unsigned int totalFlushedEntries = 0;
  1346. unsigned int maxFlushedEntries = 0;
  1347. #endif
  1348. totalChain = 0;
  1349. maxChain = 0;
  1350. totalMissChain = 0;
  1351. maxMissChain = 0;
  1352. // Sum information over all caches
  1353. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1354. {
  1355. for (index = 0; index < MAX_LOG2_SIZE; index += 1)
  1356. {
  1357. cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
  1358. totalEntries += totalEntriesBySize[isMeta][index];
  1359. totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
  1360. totalChain += totalChainBySize[isMeta][index];
  1361. if (maxEntriesBySize[isMeta][index] > maxEntries)
  1362. maxEntries = maxEntriesBySize[isMeta][index];
  1363. if (maxChainBySize[isMeta][index] > maxChain)
  1364. maxChain = maxChainBySize[isMeta][index];
  1365. totalMissChain += totalMissChainBySize[isMeta][index];
  1366. if (maxMissChainBySize[isMeta][index] > maxMissChain)
  1367. maxMissChain = maxMissChainBySize[isMeta][index];
  1368. #ifdef OBJC_INSTRUMENTED
  1369. totalHitCount += hitCountBySize[isMeta][index];
  1370. totalHitProbes += hitProbesBySize[isMeta][index];
  1371. if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
  1372. maxHitProbes = maxHitProbesBySize[isMeta][index];
  1373. totalMissCount += missCountBySize[isMeta][index];
  1374. totalMissProbes += missProbesBySize[isMeta][index];
  1375. if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
  1376. maxMissProbes = maxMissProbesBySize[isMeta][index];
  1377. totalFlushCount += flushCountBySize[isMeta][index];
  1378. totalFlushedEntries += flushedEntriesBySize[isMeta][index];
  1379. if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
  1380. maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
  1381. #endif
  1382. }
  1383. totalCacheCount += cacheCountByType[isMeta];
  1384. }
  1385. // Log our findings
  1386. printf ("There are %u classes\n", classCount);
  1387. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1388. {
  1389. // Number of this type of class
  1390. printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
  1391. cacheCountByType[isMeta],
  1392. isMeta ? "class" : "instance");
  1393. // Print header
  1394. PrintCacheHeader ();
  1395. // Keep format consistent even if there are caches of this kind
  1396. if (cacheCountByType[isMeta] == 0)
  1397. {
  1398. printf ("(none)\n");
  1399. continue;
  1400. }
  1401. // Usage information by cache size
  1402. for (index = 0; index < MAX_LOG2_SIZE; index += 1)
  1403. {
  1404. unsigned int cacheCount;
  1405. unsigned int cacheSlotCount;
  1406. unsigned int cacheEntryCount;
  1407. // Get number of caches of this type and size
  1408. cacheCount = cacheCountBySize[isMeta][index];
  1409. if (cacheCount == 0)
  1410. continue;
  1411. // Get the cache slot count and the total number of valid entries
  1412. cacheSlotCount = (1 << index);
  1413. cacheEntryCount = totalEntriesBySize[isMeta][index];
  1414. // Give the analysis
  1415. PrintCacheInfo (cacheSlotCount,
  1416. cacheCount,
  1417. cacheEntryCount,
  1418. (float) cacheEntryCount / (float) cacheCount,
  1419. maxEntriesBySize[isMeta][index],
  1420. (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
  1421. maxChainBySize[isMeta][index],
  1422. (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
  1423. maxMissChainBySize[isMeta][index]
  1424. #ifdef OBJC_INSTRUMENTED
  1425. , hitCountBySize[isMeta][index],
  1426. hitCountBySize[isMeta][index] ?
  1427. (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
  1428. maxHitProbesBySize[isMeta][index],
  1429. missCountBySize[isMeta][index],
  1430. missCountBySize[isMeta][index] ?
  1431. (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
  1432. maxMissProbesBySize[isMeta][index],
  1433. flushCountBySize[isMeta][index],
  1434. flushCountBySize[isMeta][index] ?
  1435. (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
  1436. maxFlushedEntriesBySize[isMeta][index]
  1437. #endif
  1438. );
  1439. }
  1440. }
  1441. // Give overall numbers
  1442. printf ("\nCumulative:\n");
  1443. PrintCacheHeader ();
  1444. PrintCacheInfo (totalSlots,
  1445. totalCacheCount,
  1446. totalEntries,
  1447. (float) totalEntries / (float) totalCacheCount,
  1448. maxEntries,
  1449. (float) totalChain / (float) totalEntries,
  1450. maxChain,
  1451. (float) totalMissChain / (float) totalSlots,
  1452. maxMissChain
  1453. #ifdef OBJC_INSTRUMENTED
  1454. , totalHitCount,
  1455. totalHitCount ?
  1456. (float) totalHitProbes / (float) totalHitCount : 0.0,
  1457. maxHitProbes,
  1458. totalMissCount,
  1459. totalMissCount ?
  1460. (float) totalMissProbes / (float) totalMissCount : 0.0,
  1461. maxMissProbes,
  1462. totalFlushCount,
  1463. totalFlushCount ?
  1464. (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
  1465. maxFlushedEntries
  1466. #endif
  1467. );
  1468. printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
  1469. printf ("Number of cache expansions: %d\n", cacheExpandCount);
  1470. #ifdef OBJC_INSTRUMENTED
  1471. printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
  1472. printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
  1473. printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
  1474. LinearFlushCachesCount,
  1475. LinearFlushCachesVisitedCount,
  1476. LinearFlushCachesCount ?
  1477. (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
  1478. MaxLinearFlushCachesVisitedCount,
  1479. LinearFlushCachesVisitedCount,
  1480. 1.0);
  1481. printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
  1482. NonlinearFlushCachesCount,
  1483. NonlinearFlushCachesVisitedCount,
  1484. NonlinearFlushCachesCount ?
  1485. (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
  1486. MaxNonlinearFlushCachesVisitedCount,
  1487. NonlinearFlushCachesClassCount,
  1488. NonlinearFlushCachesClassCount ?
  1489. (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
  1490. printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
  1491. LinearFlushCachesCount + NonlinearFlushCachesCount,
  1492. IdealFlushCachesCount,
  1493. LinearFlushCachesCount + NonlinearFlushCachesCount ?
  1494. (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
  1495. MaxIdealFlushCachesCount,
  1496. LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
  1497. LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
  1498. (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
  1499. PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
  1500. PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
  1501. #endif
  1502. #if 0
  1503. printf ("\nLookup chains:");
  1504. for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
  1505. {
  1506. if (chainCount[index] != 0)
  1507. printf (" %u:%u", index, chainCount[index]);
  1508. }
  1509. printf ("\nMiss chains:");
  1510. for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
  1511. {
  1512. if (missChainCount[index] != 0)
  1513. printf (" %u:%u", index, missChainCount[index]);
  1514. }
  1515. printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
  1516. totalCacheCount * (sizeof(struct objc_cache) - sizeof(cache_entry *)) +
  1517. totalSlots * sizeof(cache_entry *) +
  1518. negativeEntryCount * sizeof(cache_entry));
  1519. #endif
  1520. }
  1521. }
  1522. #endif
  1523. void cache_init()
  1524. {
  1525. }
  1526. // !__OBJC2__
  1527. #endif