objc-cache-old.mm 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793
  1. /*
  2. * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. /***********************************************************************
  24. * objc-cache.m
  25. * Method cache management
  26. * Cache flushing
  27. * Cache garbage collection
  28. * Cache instrumentation
  29. * Dedicated allocator for large caches
  30. **********************************************************************/
  31. /***********************************************************************
  32. * Method cache locking (GrP 2001-1-14)
  33. *
  34. * For speed, objc_msgSend does not acquire any locks when it reads
  35. * method caches. Instead, all cache changes are performed so that any
  36. * objc_msgSend running concurrently with the cache mutator will not
  37. * crash or hang or get an incorrect result from the cache.
  38. *
  39. * When cache memory becomes unused (e.g. the old cache after cache
  40. * expansion), it is not immediately freed, because a concurrent
  41. * objc_msgSend could still be using it. Instead, the memory is
  42. * disconnected from the data structures and placed on a garbage list.
  43. * The memory is now only accessible to instances of objc_msgSend that
  44. * were running when the memory was disconnected; any further calls to
  45. * objc_msgSend will not see the garbage memory because the other data
  46. * structures don't point to it anymore. The collecting_in_critical
  47. * function checks the PC of all threads and returns FALSE when all threads
  48. * are found to be outside objc_msgSend. This means any call to objc_msgSend
  49. * that could have had access to the garbage has finished or moved past the
  50. * cache lookup stage, so it is safe to free the memory.
  51. *
  52. * All functions that modify cache data or structures must acquire the
  53. * cacheUpdateLock to prevent interference from concurrent modifications.
  54. * The function that frees cache garbage must acquire the cacheUpdateLock
  55. * and use collecting_in_critical() to flush out cache readers.
  56. * The cacheUpdateLock is also used to protect the custom allocator used
  57. * for large method cache blocks.
  58. *
  59. * Cache readers (PC-checked by collecting_in_critical())
  60. * objc_msgSend*
  61. * _cache_getImp
  62. * _cache_getMethod
  63. *
  64. * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
  65. * _cache_fill (acquires lock)
  66. * _cache_expand (only called from cache_fill)
  67. * _cache_create (only called from cache_expand)
  68. * bcopy (only called from instrumented cache_expand)
  69. * flush_caches (acquires lock)
  70. * _cache_flush (only called from cache_fill and flush_caches)
  71. * _cache_collect_free (only called from cache_expand and cache_flush)
  72. *
  73. * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
  74. * _cache_print
  75. * _class_printMethodCaches
  76. * _class_printDuplicateCacheEntries
  77. * _class_printMethodCacheStatistics
  78. *
  79. * _class_lookupMethodAndLoadCache is a special case. It may read a
  80. * method triplet out of one cache and store it in another cache. This
  81. * is unsafe if the method triplet is a forward:: entry, because the
  82. * triplet itself could be freed unless _class_lookupMethodAndLoadCache
  83. * were PC-checked or used a lock. Additionally, storing the method
  84. * triplet in both caches would result in double-freeing if both caches
  85. * were flushed or expanded. The solution is for _cache_getMethod to
  86. * ignore all entries whose implementation is _objc_msgForward_impcache,
  87. * so _class_lookupMethodAndLoadCache cannot look at a forward:: entry
  88. * unsafely or place it in multiple caches.
  89. ***********************************************************************/
  90. #if !__OBJC2__
  91. #include "objc-private.h"
  92. #include "objc-cache-old.h"
  93. #include "hashtable2.h"
  94. typedef struct {
  95. SEL name; // same layout as struct old_method
  96. void *unused;
  97. IMP imp; // same layout as struct old_method
  98. } cache_entry;
  99. /* When _class_slow_grow is non-zero, any given cache is actually grown
  100. * only on the odd-numbered times it becomes full; on the even-numbered
  101. * times, it is simply emptied and re-used. When this flag is zero,
  102. * caches are grown every time. */
  103. static const int _class_slow_grow = 1;
  104. /* For min cache size: clear_cache=1, slow_grow=1
  105. For max cache size: clear_cache=0, slow_grow=0 */
  106. /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
  107. enum {
  108. INIT_CACHE_SIZE_LOG2 = 2,
  109. INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
  110. };
  111. /* Amount of space required for `count` hash table buckets, knowing that
  112. * one entry is embedded in the cache structure itself. */
  113. #define TABLE_SIZE(count) ((count - 1) * sizeof(cache_entry *))
  114. #if !TARGET_OS_WIN32
  115. # define CACHE_ALLOCATOR
  116. #endif
  117. /* Custom cache allocator parameters.
  118. * CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM. */
  119. #define CACHE_ALLOCATOR_MIN 512
  120. #define CACHE_QUANTUM (CACHE_ALLOCATOR_MIN+sizeof(struct objc_cache)-sizeof(cache_entry*))
  121. #define CACHE_REGION_SIZE ((128*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
  122. // #define CACHE_REGION_SIZE ((256*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
  123. static uintptr_t cache_allocator_mask_for_size(size_t size)
  124. {
  125. return (size - sizeof(struct objc_cache)) / sizeof(cache_entry *);
  126. }
  127. static size_t cache_allocator_size_for_mask(uintptr_t mask)
  128. {
  129. size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
  130. size_t actual = CACHE_QUANTUM;
  131. while (actual < requested) actual += CACHE_QUANTUM;
  132. return actual;
  133. }
  134. /* Cache instrumentation data. Immediately follows the cache block itself. */
  135. #ifdef OBJC_INSTRUMENTED
  136. typedef struct
  137. {
  138. unsigned int hitCount; // cache lookup success tally
  139. unsigned int hitProbes; // sum entries checked to hit
  140. unsigned int maxHitProbes; // max entries checked to hit
  141. unsigned int missCount; // cache lookup no-find tally
  142. unsigned int missProbes; // sum entries checked to miss
  143. unsigned int maxMissProbes; // max entries checked to miss
  144. unsigned int flushCount; // cache flush tally
  145. unsigned int flushedEntries; // sum cache entries flushed
  146. unsigned int maxFlushedEntries; // max cache entries flushed
  147. } CacheInstrumentation;
  148. #define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
  149. #endif
  150. /* Cache filling and flushing instrumentation */
  151. static int totalCacheFills = 0;
  152. #ifdef OBJC_INSTRUMENTED
  153. unsigned int LinearFlushCachesCount = 0;
  154. unsigned int LinearFlushCachesVisitedCount = 0;
  155. unsigned int MaxLinearFlushCachesVisitedCount = 0;
  156. unsigned int NonlinearFlushCachesCount = 0;
  157. unsigned int NonlinearFlushCachesClassCount = 0;
  158. unsigned int NonlinearFlushCachesVisitedCount = 0;
  159. unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
  160. unsigned int IdealFlushCachesCount = 0;
  161. unsigned int MaxIdealFlushCachesCount = 0;
  162. #endif
  163. /***********************************************************************
  164. * A static empty cache. All classes initially point at this cache.
  165. * When the first message is sent it misses in the cache, and when
  166. * the cache is grown it checks for this case and uses malloc rather
  167. * than realloc. This avoids the need to check for NULL caches in the
  168. * messenger.
  169. ***********************************************************************/
  170. struct objc_cache _objc_empty_cache =
  171. {
  172. 0, // mask
  173. 0, // occupied
  174. { NULL } // buckets
  175. };
  176. #ifdef OBJC_INSTRUMENTED
  177. CacheInstrumentation emptyCacheInstrumentation = {0};
  178. #endif
  179. /* Local prototypes */
  180. static bool _cache_isEmpty(Cache cache);
  181. static Cache _cache_malloc(uintptr_t slotCount);
  182. static Cache _cache_create(Class cls);
  183. static Cache _cache_expand(Class cls);
  184. static int _collecting_in_critical(void);
  185. static void _garbage_make_room(void);
  186. static void _cache_collect_free(void *data, size_t size);
  187. #if defined(CACHE_ALLOCATOR)
  188. static bool cache_allocator_is_block(void *block);
  189. static Cache cache_allocator_calloc(size_t size);
  190. static void cache_allocator_free(void *block);
  191. #endif
  192. /***********************************************************************
  193. * Cache statistics for OBJC_PRINT_CACHE_SETUP
  194. **********************************************************************/
  195. static unsigned int cache_counts[16];
  196. static size_t cache_allocations;
  197. static size_t cache_collections;
  198. static size_t cache_allocator_regions;
  199. static size_t log2u(size_t x)
  200. {
  201. unsigned int log;
  202. log = 0;
  203. while (x >>= 1)
  204. log += 1;
  205. return log;
  206. }
  207. /***********************************************************************
  208. * _cache_isEmpty.
  209. * Returns YES if the given cache is some empty cache.
  210. * Empty caches should never be allocated on the heap.
  211. **********************************************************************/
  212. static bool _cache_isEmpty(Cache cache)
  213. {
  214. return (cache == NULL || cache == (Cache)&_objc_empty_cache || cache->mask == 0);
  215. }
  216. /***********************************************************************
  217. * _cache_malloc.
  218. *
  219. * Called from _cache_create() and cache_expand()
  220. * Cache locks: cacheUpdateLock must be held by the caller.
  221. **********************************************************************/
  222. static Cache _cache_malloc(uintptr_t slotCount)
  223. {
  224. Cache new_cache;
  225. size_t size;
  226. cacheUpdateLock.assertLocked();
  227. // Allocate table (why not check for failure?)
  228. size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
  229. #if defined(OBJC_INSTRUMENTED)
  230. // Custom cache allocator can't handle instrumentation.
  231. size += sizeof(CacheInstrumentation);
  232. new_cache = calloc(size, 1);
  233. new_cache->mask = slotCount - 1;
  234. #elif !defined(CACHE_ALLOCATOR)
  235. // fixme cache allocator implementation isn't 64-bit clean
  236. new_cache = calloc(size, 1);
  237. new_cache->mask = (unsigned int)(slotCount - 1);
  238. #else
  239. if (size < CACHE_ALLOCATOR_MIN) {
  240. new_cache = (Cache)calloc(size, 1);
  241. new_cache->mask = slotCount - 1;
  242. // occupied and buckets and instrumentation are all zero
  243. } else {
  244. new_cache = cache_allocator_calloc(size);
  245. // mask is already set
  246. // occupied and buckets and instrumentation are all zero
  247. }
  248. #endif
  249. if (PrintCaches) {
  250. size_t bucket = log2u(slotCount);
  251. if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
  252. cache_counts[bucket]++;
  253. }
  254. cache_allocations++;
  255. }
  256. return new_cache;
  257. }
  258. /***********************************************************************
  259. * _cache_free_block.
  260. *
  261. * Called from _cache_free() and _cache_collect_free().
  262. * block may be a cache or a forward:: entry.
  263. * If block is a cache, forward:: entries it points to will NOT be freed.
  264. * Cache locks: cacheUpdateLock must be held by the caller.
  265. **********************************************************************/
  266. static inline int isPowerOf2(unsigned long l) { return 1 == __builtin_popcountl(l); }
  267. static void _cache_free_block(void *block)
  268. {
  269. cacheUpdateLock.assertLocked();
  270. #if !TARGET_OS_WIN32
  271. if (PrintCaches) {
  272. Cache cache = (Cache)block;
  273. size_t slotCount = cache->mask + 1;
  274. if (isPowerOf2(slotCount)) {
  275. size_t bucket = log2u(slotCount);
  276. if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
  277. cache_counts[bucket]--;
  278. }
  279. }
  280. }
  281. #endif
  282. #if defined(CACHE_ALLOCATOR)
  283. if (cache_allocator_is_block(block)) {
  284. cache_allocator_free(block);
  285. } else
  286. #endif
  287. {
  288. free(block);
  289. }
  290. }
  291. /***********************************************************************
  292. * _cache_free.
  293. *
  294. * Called from _objc_remove_classes_in_image().
  295. * forward:: entries in the cache ARE freed.
  296. * Cache locks: cacheUpdateLock must NOT be held by the caller.
  297. **********************************************************************/
  298. void _cache_free(Cache cache)
  299. {
  300. unsigned int i;
  301. mutex_locker_t lock(cacheUpdateLock);
  302. for (i = 0; i < cache->mask + 1; i++) {
  303. cache_entry *entry = (cache_entry *)cache->buckets[i];
  304. if (entry && entry->imp == _objc_msgForward_impcache) {
  305. _cache_free_block(entry);
  306. }
  307. }
  308. _cache_free_block(cache);
  309. }
  310. /***********************************************************************
  311. * _cache_create.
  312. *
  313. * Called from _cache_expand().
  314. * Cache locks: cacheUpdateLock must be held by the caller.
  315. **********************************************************************/
  316. static Cache _cache_create(Class cls)
  317. {
  318. Cache new_cache;
  319. cacheUpdateLock.assertLocked();
  320. // Allocate new cache block
  321. new_cache = _cache_malloc(INIT_CACHE_SIZE);
  322. // Install the cache
  323. cls->cache = new_cache;
  324. // Clear the grow flag so that we will re-use the current storage,
  325. // rather than actually grow the cache, when expanding the cache
  326. // for the first time
  327. if (_class_slow_grow) {
  328. cls->setShouldGrowCache(false);
  329. }
  330. // Return our creation
  331. return new_cache;
  332. }
  333. /***********************************************************************
  334. * _cache_expand.
  335. *
  336. * Called from _cache_fill ()
  337. * Cache locks: cacheUpdateLock must be held by the caller.
  338. **********************************************************************/
  339. static Cache _cache_expand(Class cls)
  340. {
  341. Cache old_cache;
  342. Cache new_cache;
  343. uintptr_t slotCount;
  344. uintptr_t index;
  345. cacheUpdateLock.assertLocked();
  346. // First growth goes from empty cache to a real one
  347. old_cache = cls->cache;
  348. if (_cache_isEmpty(old_cache))
  349. return _cache_create (cls);
  350. if (_class_slow_grow) {
  351. // Cache grows every other time only.
  352. if (cls->shouldGrowCache()) {
  353. // Grow the cache this time. Don't grow next time.
  354. cls->setShouldGrowCache(false);
  355. }
  356. else {
  357. // Reuse the current cache storage this time. Do grow next time.
  358. cls->setShouldGrowCache(true);
  359. // Clear the valid-entry counter
  360. old_cache->occupied = 0;
  361. // Invalidate all the cache entries
  362. for (index = 0; index < old_cache->mask + 1; index += 1)
  363. {
  364. // Remember what this entry was, so we can possibly
  365. // deallocate it after the bucket has been invalidated
  366. cache_entry *oldEntry = (cache_entry *)old_cache->buckets[index];
  367. // Skip invalid entry
  368. if (!oldEntry)
  369. continue;
  370. // Invalidate this entry
  371. old_cache->buckets[index] = NULL;
  372. // Deallocate "forward::" entry
  373. if (oldEntry->imp == _objc_msgForward_impcache) {
  374. _cache_collect_free (oldEntry, sizeof(cache_entry));
  375. }
  376. }
  377. // Return the same old cache, freshly emptied
  378. return old_cache;
  379. }
  380. }
  381. // Double the cache size
  382. slotCount = (old_cache->mask + 1) << 1;
  383. new_cache = _cache_malloc(slotCount);
  384. #ifdef OBJC_INSTRUMENTED
  385. // Propagate the instrumentation data
  386. {
  387. CacheInstrumentation *oldCacheData;
  388. CacheInstrumentation *newCacheData;
  389. oldCacheData = CACHE_INSTRUMENTATION(old_cache);
  390. newCacheData = CACHE_INSTRUMENTATION(new_cache);
  391. bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
  392. }
  393. #endif
  394. // Deallocate "forward::" entries from the old cache
  395. for (index = 0; index < old_cache->mask + 1; index++) {
  396. cache_entry *entry = (cache_entry *)old_cache->buckets[index];
  397. if (entry && entry->imp == _objc_msgForward_impcache) {
  398. _cache_collect_free (entry, sizeof(cache_entry));
  399. }
  400. }
  401. // Install new cache
  402. cls->cache = new_cache;
  403. // Deallocate old cache, try freeing all the garbage
  404. _cache_collect_free (old_cache, old_cache->mask * sizeof(cache_entry *));
  405. _cache_collect(false);
  406. return new_cache;
  407. }
  408. /***********************************************************************
  409. * _cache_fill. Add the specified method to the specified class' cache.
  410. * Returns NO if the cache entry wasn't added: cache was busy,
  411. * class is still being initialized, new entry is a duplicate.
  412. *
  413. * Called only from _class_lookupMethodAndLoadCache and
  414. * class_respondsToMethod and _cache_addForwardEntry.
  415. *
  416. * Cache locks: cacheUpdateLock must not be held.
  417. **********************************************************************/
  418. bool _cache_fill(Class cls, Method smt, SEL sel)
  419. {
  420. uintptr_t newOccupied;
  421. uintptr_t index;
  422. cache_entry **buckets;
  423. cache_entry *entry;
  424. Cache cache;
  425. cacheUpdateLock.assertUnlocked();
  426. // Never cache before +initialize is done
  427. if (!cls->isInitialized()) {
  428. return NO;
  429. }
  430. // Keep tally of cache additions
  431. totalCacheFills += 1;
  432. mutex_locker_t lock(cacheUpdateLock);
  433. entry = (cache_entry *)smt;
  434. cache = cls->cache;
  435. // Make sure the entry wasn't added to the cache by some other thread
  436. // before we grabbed the cacheUpdateLock.
  437. // Don't use _cache_getMethod() because _cache_getMethod() doesn't
  438. // return forward:: entries.
  439. if (_cache_getImp(cls, sel)) {
  440. return NO; // entry is already cached, didn't add new one
  441. }
  442. // Use the cache as-is if it is less than 3/4 full
  443. newOccupied = cache->occupied + 1;
  444. if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
  445. // Cache is less than 3/4 full.
  446. cache->occupied = (unsigned int)newOccupied;
  447. } else {
  448. // Cache is too full. Expand it.
  449. cache = _cache_expand (cls);
  450. // Account for the addition
  451. cache->occupied += 1;
  452. }
  453. // Scan for the first unused slot and insert there.
  454. // There is guaranteed to be an empty slot because the
  455. // minimum size is 4 and we resized at 3/4 full.
  456. buckets = (cache_entry **)cache->buckets;
  457. for (index = CACHE_HASH(sel, cache->mask);
  458. buckets[index] != NULL;
  459. index = (index+1) & cache->mask)
  460. {
  461. // empty
  462. }
  463. buckets[index] = entry;
  464. return YES; // successfully added new cache entry
  465. }
  466. /***********************************************************************
  467. * _cache_addForwardEntry
  468. * Add a forward:: entry for the given selector to cls's method cache.
  469. * Does nothing if the cache addition fails for any reason.
  470. * Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
  471. * Cache locks: cacheUpdateLock must not be held.
  472. **********************************************************************/
  473. void _cache_addForwardEntry(Class cls, SEL sel)
  474. {
  475. cache_entry *smt;
  476. smt = (cache_entry *)malloc(sizeof(cache_entry));
  477. smt->name = sel;
  478. smt->imp = _objc_msgForward_impcache;
  479. if (! _cache_fill(cls, (Method)smt, sel)) { // fixme hack
  480. // Entry not added to cache. Don't leak the method struct.
  481. free(smt);
  482. }
  483. }
  484. /***********************************************************************
  485. * _cache_flush. Invalidate all valid entries in the given class' cache.
  486. *
  487. * Called from flush_caches() and _cache_fill()
  488. * Cache locks: cacheUpdateLock must be held by the caller.
  489. **********************************************************************/
  490. void _cache_flush(Class cls)
  491. {
  492. Cache cache;
  493. unsigned int index;
  494. cacheUpdateLock.assertLocked();
  495. // Locate cache. Ignore unused cache.
  496. cache = cls->cache;
  497. if (_cache_isEmpty(cache)) return;
  498. #ifdef OBJC_INSTRUMENTED
  499. {
  500. CacheInstrumentation *cacheData;
  501. // Tally this flush
  502. cacheData = CACHE_INSTRUMENTATION(cache);
  503. cacheData->flushCount += 1;
  504. cacheData->flushedEntries += cache->occupied;
  505. if (cache->occupied > cacheData->maxFlushedEntries)
  506. cacheData->maxFlushedEntries = cache->occupied;
  507. }
  508. #endif
  509. // Traverse the cache
  510. for (index = 0; index <= cache->mask; index += 1)
  511. {
  512. // Remember what this entry was, so we can possibly
  513. // deallocate it after the bucket has been invalidated
  514. cache_entry *oldEntry = (cache_entry *)cache->buckets[index];
  515. // Invalidate this entry
  516. cache->buckets[index] = NULL;
  517. // Deallocate "forward::" entry
  518. if (oldEntry && oldEntry->imp == _objc_msgForward_impcache)
  519. _cache_collect_free (oldEntry, sizeof(cache_entry));
  520. }
  521. // Clear the valid-entry counter
  522. cache->occupied = 0;
  523. }
  524. /***********************************************************************
  525. * flush_cache. Flushes the instance method cache for class cls only.
  526. * Use flush_caches() if cls might have in-use subclasses.
  527. **********************************************************************/
  528. void flush_cache(Class cls)
  529. {
  530. if (cls) {
  531. mutex_locker_t lock(cacheUpdateLock);
  532. _cache_flush(cls);
  533. }
  534. }
  535. /***********************************************************************
  536. * cache collection.
  537. **********************************************************************/
  538. #if !TARGET_OS_WIN32
  539. // A sentinel (magic value) to report bad thread_get_state status.
  540. // Must not be a valid PC.
  541. // Must not be zero - thread_get_state() on a new thread returns PC == 0.
  542. #define PC_SENTINEL 1
  543. // UNIX03 compliance hack (4508809)
  544. #if !__DARWIN_UNIX03
  545. #define __srr0 srr0
  546. #define __eip eip
  547. #endif
  548. static uintptr_t _get_pc_for_thread(thread_t thread)
  549. #if defined(__i386__)
  550. {
  551. i386_thread_state_t state;
  552. unsigned int count = i386_THREAD_STATE_COUNT;
  553. kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
  554. return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
  555. }
  556. #elif defined(__x86_64__)
  557. {
  558. x86_thread_state64_t state;
  559. unsigned int count = x86_THREAD_STATE64_COUNT;
  560. kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
  561. return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
  562. }
  563. #elif defined(__arm__)
  564. {
  565. arm_thread_state_t state;
  566. unsigned int count = ARM_THREAD_STATE_COUNT;
  567. kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
  568. return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
  569. }
  570. #else
  571. {
  572. #error _get_pc_for_thread () not implemented for this architecture
  573. }
  574. #endif
  575. #endif
  576. /***********************************************************************
  577. * _collecting_in_critical.
  578. * Returns TRUE if some thread is currently executing a cache-reading
  579. * function. Collection of cache garbage is not allowed when a cache-
  580. * reading function is in progress because it might still be using
  581. * the garbage memory.
  582. **********************************************************************/
  583. extern "C" uintptr_t objc_entryPoints[];
  584. extern "C" uintptr_t objc_exitPoints[];
  585. static int _collecting_in_critical(void)
  586. {
  587. #if TARGET_OS_WIN32
  588. return TRUE;
  589. #else
  590. thread_act_port_array_t threads;
  591. unsigned number;
  592. unsigned count;
  593. kern_return_t ret;
  594. int result;
  595. mach_port_t mythread = pthread_mach_thread_np(pthread_self());
  596. // Get a list of all the threads in the current task
  597. ret = task_threads (mach_task_self (), &threads, &number);
  598. if (ret != KERN_SUCCESS)
  599. {
  600. _objc_fatal("task_thread failed (result %d)\n", ret);
  601. }
  602. // Check whether any thread is in the cache lookup code
  603. result = FALSE;
  604. for (count = 0; count < number; count++)
  605. {
  606. int region;
  607. uintptr_t pc;
  608. // Don't bother checking ourselves
  609. if (threads[count] == mythread)
  610. continue;
  611. // Find out where thread is executing
  612. pc = _get_pc_for_thread (threads[count]);
  613. // Check for bad status, and if so, assume the worse (can't collect)
  614. if (pc == PC_SENTINEL)
  615. {
  616. result = TRUE;
  617. goto done;
  618. }
  619. // Check whether it is in the cache lookup code
  620. for (region = 0; objc_entryPoints[region] != 0; region++)
  621. {
  622. if ((pc >= objc_entryPoints[region]) &&
  623. (pc <= objc_exitPoints[region]))
  624. {
  625. result = TRUE;
  626. goto done;
  627. }
  628. }
  629. }
  630. done:
  631. // Deallocate the port rights for the threads
  632. for (count = 0; count < number; count++) {
  633. mach_port_deallocate(mach_task_self (), threads[count]);
  634. }
  635. // Deallocate the thread list
  636. vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
  637. // Return our finding
  638. return result;
  639. #endif
  640. }
  641. /***********************************************************************
  642. * _garbage_make_room. Ensure that there is enough room for at least
  643. * one more ref in the garbage.
  644. **********************************************************************/
  645. // amount of memory represented by all refs in the garbage
  646. static size_t garbage_byte_size = 0;
  647. // do not empty the garbage until garbage_byte_size gets at least this big
  648. static size_t garbage_threshold = 1024;
  649. // table of refs to free
  650. static void **garbage_refs = 0;
  651. // current number of refs in garbage_refs
  652. static size_t garbage_count = 0;
  653. // capacity of current garbage_refs
  654. static size_t garbage_max = 0;
  655. // capacity of initial garbage_refs
  656. enum {
  657. INIT_GARBAGE_COUNT = 128
  658. };
  659. static void _garbage_make_room(void)
  660. {
  661. static int first = 1;
  662. // Create the collection table the first time it is needed
  663. if (first)
  664. {
  665. first = 0;
  666. garbage_refs = (void**)
  667. malloc(INIT_GARBAGE_COUNT * sizeof(void *));
  668. garbage_max = INIT_GARBAGE_COUNT;
  669. }
  670. // Double the table if it is full
  671. else if (garbage_count == garbage_max)
  672. {
  673. garbage_refs = (void**)
  674. realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
  675. garbage_max *= 2;
  676. }
  677. }
  678. /***********************************************************************
  679. * _cache_collect_free. Add the specified malloc'd memory to the list
  680. * of them to free at some later point.
  681. * size is used for the collection threshold. It does not have to be
  682. * precisely the block's size.
  683. * Cache locks: cacheUpdateLock must be held by the caller.
  684. **********************************************************************/
  685. static void _cache_collect_free(void *data, size_t size)
  686. {
  687. cacheUpdateLock.assertLocked();
  688. _garbage_make_room ();
  689. garbage_byte_size += size;
  690. garbage_refs[garbage_count++] = data;
  691. }
  692. /***********************************************************************
  693. * _cache_collect. Try to free accumulated dead caches.
  694. * collectALot tries harder to free memory.
  695. * Cache locks: cacheUpdateLock must be held by the caller.
  696. **********************************************************************/
  697. void _cache_collect(bool collectALot)
  698. {
  699. cacheUpdateLock.assertLocked();
  700. // Done if the garbage is not full
  701. if (garbage_byte_size < garbage_threshold && !collectALot) {
  702. return;
  703. }
  704. // Synchronize collection with objc_msgSend and other cache readers
  705. if (!collectALot) {
  706. if (_collecting_in_critical ()) {
  707. // objc_msgSend (or other cache reader) is currently looking in
  708. // the cache and might still be using some garbage.
  709. if (PrintCaches) {
  710. _objc_inform ("CACHES: not collecting; "
  711. "objc_msgSend in progress");
  712. }
  713. return;
  714. }
  715. }
  716. else {
  717. // No excuses.
  718. while (_collecting_in_critical())
  719. ;
  720. }
  721. // No cache readers in progress - garbage is now deletable
  722. // Log our progress
  723. if (PrintCaches) {
  724. cache_collections++;
  725. _objc_inform ("CACHES: COLLECTING %zu bytes (%zu regions, %zu allocations, %zu collections)", garbage_byte_size, cache_allocator_regions, cache_allocations, cache_collections);
  726. }
  727. // Dispose all refs now in the garbage
  728. while (garbage_count--) {
  729. _cache_free_block(garbage_refs[garbage_count]);
  730. }
  731. // Clear the garbage count and total size indicator
  732. garbage_count = 0;
  733. garbage_byte_size = 0;
  734. if (PrintCaches) {
  735. size_t i;
  736. size_t total = 0;
  737. size_t ideal_total = 0;
  738. size_t malloc_total = 0;
  739. size_t local_total = 0;
  740. for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
  741. int count = cache_counts[i];
  742. int slots = 1 << i;
  743. size_t size = sizeof(struct objc_cache) + TABLE_SIZE(slots);
  744. size_t ideal = size;
  745. #if TARGET_OS_WIN32
  746. size_t malloc = size;
  747. #else
  748. size_t malloc = malloc_good_size(size);
  749. #endif
  750. size_t local = size < CACHE_ALLOCATOR_MIN ? malloc : cache_allocator_size_for_mask(cache_allocator_mask_for_size(size));
  751. if (!count) continue;
  752. _objc_inform("CACHES: %4d slots: %4d caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", slots, count, ideal*count, malloc*count, local*count, malloc*count-ideal*count, local*count-ideal*count);
  753. total += count;
  754. ideal_total += ideal*count;
  755. malloc_total += malloc*count;
  756. local_total += local*count;
  757. }
  758. _objc_inform("CACHES: total: %4zu caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", total, ideal_total, malloc_total, local_total, malloc_total-ideal_total, local_total-ideal_total);
  759. }
  760. }
  761. #if defined(CACHE_ALLOCATOR)
  762. /***********************************************************************
  763. * Custom method cache allocator.
  764. * Method cache block sizes are 2^slots+2 words, which is a pessimal
  765. * case for the system allocator. It wastes 504 bytes per cache block
  766. * with 128 or more slots, which adds up to tens of KB for an AppKit process.
  767. * To save memory, the custom cache allocator below is used.
  768. *
  769. * The cache allocator uses 128 KB allocation regions. Few processes will
  770. * require a second region. Within a region, allocation is address-ordered
  771. * first fit.
  772. *
  773. * The cache allocator uses a quantum of 520.
  774. * Cache block ideal sizes: 520, 1032, 2056, 4104
  775. * Cache allocator sizes: 520, 1040, 2080, 4160
  776. *
  777. * Because all blocks are known to be genuine method caches, the ordinary
  778. * cache->mask and cache->occupied fields are used as block headers.
  779. * No out-of-band headers are maintained. The number of blocks will
  780. * almost always be fewer than 200, so for simplicity there is no free
  781. * list or other optimization.
  782. *
  783. * Block in use: mask != 0, occupied != -1 (mask indicates block size)
  784. * Block free: mask != 0, occupied == -1 (mask is precisely block size)
  785. *
  786. * No cache allocator functions take any locks. Instead, the caller
  787. * must hold the cacheUpdateLock.
  788. *
  789. * fixme with 128 KB regions and 520 B min block size, an allocation
  790. * bitmap would be only 32 bytes - better than free list?
  791. **********************************************************************/
  792. typedef struct cache_allocator_block {
  793. uintptr_t size;
  794. uintptr_t state;
  795. struct cache_allocator_block *nextFree;
  796. } cache_allocator_block;
  797. typedef struct cache_allocator_region {
  798. cache_allocator_block *start;
  799. cache_allocator_block *end; // first non-block address
  800. cache_allocator_block *freeList;
  801. struct cache_allocator_region *next;
  802. } cache_allocator_region;
  803. static cache_allocator_region *cacheRegion = NULL;
  804. /***********************************************************************
  805. * cache_allocator_add_region
  806. * Allocates and returns a new region that can hold at least size
  807. * bytes of large method caches.
  808. * The actual size will be rounded up to a CACHE_QUANTUM boundary,
  809. * with a minimum of CACHE_REGION_SIZE.
  810. * The new region is lowest-priority for new allocations. Callers that
  811. * know the other regions are already full should allocate directly
  812. * into the returned region.
  813. **********************************************************************/
  814. static cache_allocator_region *cache_allocator_add_region(size_t size)
  815. {
  816. vm_address_t addr;
  817. cache_allocator_block *b;
  818. cache_allocator_region **rgnP;
  819. cache_allocator_region *newRegion = (cache_allocator_region *)
  820. calloc(1, sizeof(cache_allocator_region));
  821. // Round size up to quantum boundary, and apply the minimum size.
  822. size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
  823. if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
  824. // Allocate the region
  825. addr = (vm_address_t)calloc(size, 1);
  826. newRegion->start = (cache_allocator_block *)addr;
  827. newRegion->end = (cache_allocator_block *)(addr + size);
  828. // Mark the first block: free and covers the entire region
  829. b = newRegion->start;
  830. b->size = size;
  831. b->state = (uintptr_t)-1;
  832. b->nextFree = NULL;
  833. newRegion->freeList = b;
  834. // Add to end of the linked list of regions.
  835. // Other regions should be re-used before this one is touched.
  836. newRegion->next = NULL;
  837. rgnP = &cacheRegion;
  838. while (*rgnP) {
  839. rgnP = &(**rgnP).next;
  840. }
  841. *rgnP = newRegion;
  842. cache_allocator_regions++;
  843. return newRegion;
  844. }
  845. /***********************************************************************
  846. * cache_allocator_coalesce
  847. * Attempts to coalesce a free block with the single free block following
  848. * it in the free list, if any.
  849. **********************************************************************/
  850. static void cache_allocator_coalesce(cache_allocator_block *block)
  851. {
  852. if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
  853. block->size += block->nextFree->size;
  854. block->nextFree = block->nextFree->nextFree;
  855. }
  856. }
  857. /***********************************************************************
  858. * cache_region_calloc
  859. * Attempt to allocate a size-byte block in the given region.
  860. * Allocation is first-fit. The free list is already fully coalesced.
  861. * Returns NULL if there is not enough room in the region for the block.
  862. **********************************************************************/
  863. static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
  864. {
  865. cache_allocator_block **blockP;
  866. uintptr_t mask;
  867. // Save mask for allocated block, then round size
  868. // up to CACHE_QUANTUM boundary
  869. mask = cache_allocator_mask_for_size(size);
  870. size = cache_allocator_size_for_mask(mask);
  871. // Search the free list for a sufficiently large free block.
  872. for (blockP = &rgn->freeList;
  873. *blockP != NULL;
  874. blockP = &(**blockP).nextFree)
  875. {
  876. cache_allocator_block *block = *blockP;
  877. if (block->size < size) continue; // not big enough
  878. // block is now big enough. Allocate from it.
  879. // Slice off unneeded fragment of block, if any,
  880. // and reconnect the free list around block.
  881. if (block->size - size >= CACHE_QUANTUM) {
  882. cache_allocator_block *leftover =
  883. (cache_allocator_block *)(size + (uintptr_t)block);
  884. leftover->size = block->size - size;
  885. leftover->state = (uintptr_t)-1;
  886. leftover->nextFree = block->nextFree;
  887. *blockP = leftover;
  888. } else {
  889. *blockP = block->nextFree;
  890. }
  891. // block is now exactly the right size.
  892. bzero(block, size);
  893. block->size = mask; // Cache->mask
  894. block->state = 0; // Cache->occupied
  895. return block;
  896. }
  897. // No room in this region.
  898. return NULL;
  899. }
  900. /***********************************************************************
  901. * cache_allocator_calloc
  902. * Custom allocator for large method caches (128+ slots)
  903. * The returned cache block already has cache->mask set.
  904. * cache->occupied and the cache contents are zero.
  905. * Cache locks: cacheUpdateLock must be held by the caller
  906. **********************************************************************/
  907. static Cache cache_allocator_calloc(size_t size)
  908. {
  909. cache_allocator_region *rgn;
  910. cacheUpdateLock.assertLocked();
  911. for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
  912. void *p = cache_region_calloc(rgn, size);
  913. if (p) {
  914. return (Cache)p;
  915. }
  916. }
  917. // No regions or all regions full - make a region and try one more time
  918. // In the unlikely case of a cache over 256KB, it will get its own region.
  919. return (Cache)cache_region_calloc(cache_allocator_add_region(size), size);
  920. }
  921. /***********************************************************************
  922. * cache_allocator_region_for_block
  923. * Returns the cache allocator region that ptr points into, or NULL.
  924. **********************************************************************/
  925. static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
  926. {
  927. cache_allocator_region *rgn;
  928. for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
  929. if (block >= rgn->start && block < rgn->end) return rgn;
  930. }
  931. return NULL;
  932. }
  933. /***********************************************************************
  934. * cache_allocator_is_block
  935. * If ptr is a live block from the cache allocator, return YES
  936. * If ptr is a block from some other allocator, return NO.
  937. * If ptr is a dead block from the cache allocator, result is undefined.
  938. * Cache locks: cacheUpdateLock must be held by the caller
  939. **********************************************************************/
  940. static bool cache_allocator_is_block(void *ptr)
  941. {
  942. cacheUpdateLock.assertLocked();
  943. return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
  944. }
  945. /***********************************************************************
  946. * cache_allocator_free
  947. * Frees a block allocated by the cache allocator.
  948. * Cache locks: cacheUpdateLock must be held by the caller.
  949. **********************************************************************/
  950. static void cache_allocator_free(void *ptr)
  951. {
  952. cache_allocator_block *dead = (cache_allocator_block *)ptr;
  953. cache_allocator_block *cur;
  954. cache_allocator_region *rgn;
  955. cacheUpdateLock.assertLocked();
  956. if (! (rgn = cache_allocator_region_for_block(dead))) {
  957. // free of non-pointer
  958. _objc_inform("cache_allocator_free of non-pointer %p", dead);
  959. return;
  960. }
  961. dead->size = cache_allocator_size_for_mask(dead->size);
  962. dead->state = (uintptr_t)-1;
  963. if (!rgn->freeList || rgn->freeList > dead) {
  964. // dead block belongs at front of free list
  965. dead->nextFree = rgn->freeList;
  966. rgn->freeList = dead;
  967. cache_allocator_coalesce(dead);
  968. return;
  969. }
  970. // dead block belongs in the middle or end of free list
  971. for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
  972. cache_allocator_block *ahead = cur->nextFree;
  973. if (!ahead || ahead > dead) {
  974. // cur and ahead straddle dead, OR dead belongs at end of free list
  975. cur->nextFree = dead;
  976. dead->nextFree = ahead;
  977. // coalesce into dead first in case both succeed
  978. cache_allocator_coalesce(dead);
  979. cache_allocator_coalesce(cur);
  980. return;
  981. }
  982. }
  983. // uh-oh
  984. _objc_inform("cache_allocator_free of non-pointer %p", ptr);
  985. }
  986. // defined(CACHE_ALLOCATOR)
  987. #endif
  988. /***********************************************************************
  989. * Cache instrumentation and debugging
  990. **********************************************************************/
  991. #ifdef OBJC_INSTRUMENTED
  992. enum {
  993. CACHE_HISTOGRAM_SIZE = 512
  994. };
  995. unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
  996. unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
  997. #endif
  998. /***********************************************************************
  999. * _cache_print.
  1000. **********************************************************************/
  1001. static void _cache_print(Cache cache)
  1002. {
  1003. uintptr_t index;
  1004. uintptr_t count;
  1005. count = cache->mask + 1;
  1006. for (index = 0; index < count; index += 1) {
  1007. cache_entry *entry = (cache_entry *)cache->buckets[index];
  1008. if (entry) {
  1009. if (entry->imp == _objc_msgForward_impcache)
  1010. printf ("does not recognize: \n");
  1011. printf ("%s\n", sel_getName(entry->name));
  1012. }
  1013. }
  1014. }
  1015. /***********************************************************************
  1016. * _class_printMethodCaches.
  1017. **********************************************************************/
  1018. void _class_printMethodCaches(Class cls)
  1019. {
  1020. if (_cache_isEmpty(cls->cache)) {
  1021. printf("no instance-method cache for class %s\n",cls->nameForLogging());
  1022. } else {
  1023. printf("instance-method cache for class %s:\n", cls->nameForLogging());
  1024. _cache_print(cls->cache);
  1025. }
  1026. if (_cache_isEmpty(cls->ISA()->cache)) {
  1027. printf("no class-method cache for class %s\n", cls->nameForLogging());
  1028. } else {
  1029. printf ("class-method cache for class %s:\n", cls->nameForLogging());
  1030. _cache_print(cls->ISA()->cache);
  1031. }
  1032. }
  1033. #if 0
  1034. #warning fixme
  1035. /***********************************************************************
  1036. * _class_printDuplicateCacheEntries.
  1037. **********************************************************************/
  1038. void _class_printDuplicateCacheEntries(bool detail)
  1039. {
  1040. NXHashState state;
  1041. Class cls;
  1042. unsigned int duplicates;
  1043. unsigned int index1;
  1044. unsigned int index2;
  1045. unsigned int mask;
  1046. unsigned int count;
  1047. unsigned int isMeta;
  1048. Cache cache;
  1049. printf ("Checking for duplicate cache entries \n");
  1050. // Outermost loop - iterate over all classes
  1051. state = NXInitHashState (class_hash);
  1052. duplicates = 0;
  1053. while (NXNextHashState (class_hash, &state, (void **) &cls))
  1054. {
  1055. // Control loop - do given class' cache, then its isa's cache
  1056. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1057. {
  1058. // Select cache of interest and make sure it exists
  1059. cache = (isMeta ? cls->ISA : cls)->cache;
  1060. if (_cache_isEmpty(cache))
  1061. continue;
  1062. // Middle loop - check each entry in the given cache
  1063. mask = cache->mask;
  1064. count = mask + 1;
  1065. for (index1 = 0; index1 < count; index1 += 1)
  1066. {
  1067. // Skip invalid entry
  1068. if (!cache->buckets[index1])
  1069. continue;
  1070. // Inner loop - check that given entry matches no later entry
  1071. for (index2 = index1 + 1; index2 < count; index2 += 1)
  1072. {
  1073. // Skip invalid entry
  1074. if (!cache->buckets[index2])
  1075. continue;
  1076. // Check for duplication by method name comparison
  1077. if (strcmp ((char *) cache->buckets[index1]->name),
  1078. (char *) cache->buckets[index2]->name)) == 0)
  1079. {
  1080. if (detail)
  1081. printf ("%s %s\n", cls->nameForLogging(), sel_getName(cache->buckets[index1]->name));
  1082. duplicates += 1;
  1083. break;
  1084. }
  1085. }
  1086. }
  1087. }
  1088. }
  1089. // Log the findings
  1090. printf ("duplicates = %d\n", duplicates);
  1091. printf ("total cache fills = %d\n", totalCacheFills);
  1092. }
  1093. /***********************************************************************
  1094. * PrintCacheHeader.
  1095. **********************************************************************/
  1096. static void PrintCacheHeader(void)
  1097. {
  1098. #ifdef OBJC_INSTRUMENTED
  1099. printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
  1100. printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
  1101. printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
  1102. #else
  1103. printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
  1104. printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
  1105. printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
  1106. #endif
  1107. }
  1108. /***********************************************************************
  1109. * PrintCacheInfo.
  1110. **********************************************************************/
  1111. static void PrintCacheInfo(unsigned int cacheSize,
  1112. unsigned int cacheCount,
  1113. unsigned int slotsUsed,
  1114. float avgUsed, unsigned int maxUsed,
  1115. float avgSHit, unsigned int maxSHit,
  1116. float avgSMiss, unsigned int maxSMiss
  1117. #ifdef OBJC_INSTRUMENTED
  1118. , unsigned int totDHits,
  1119. float avgDHit,
  1120. unsigned int maxDHit,
  1121. unsigned int totDMisses,
  1122. float avgDMiss,
  1123. unsigned int maxDMiss,
  1124. unsigned int totDFlsh,
  1125. float avgDFlsh,
  1126. unsigned int maxDFlsh
  1127. #endif
  1128. )
  1129. {
  1130. #ifdef OBJC_INSTRUMENTED
  1131. printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
  1132. #else
  1133. printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
  1134. #endif
  1135. cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
  1136. #ifdef OBJC_INSTRUMENTED
  1137. , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
  1138. #endif
  1139. );
  1140. }
  1141. #ifdef OBJC_INSTRUMENTED
  1142. /***********************************************************************
  1143. * PrintCacheHistogram. Show the non-zero entries from the specified
  1144. * cache histogram.
  1145. **********************************************************************/
  1146. static void PrintCacheHistogram(char *title,
  1147. unsigned int *firstEntry,
  1148. unsigned int entryCount)
  1149. {
  1150. unsigned int index;
  1151. unsigned int *thisEntry;
  1152. printf ("%s\n", title);
  1153. printf (" Probes Tally\n");
  1154. printf (" ------ -----\n");
  1155. for (index = 0, thisEntry = firstEntry;
  1156. index < entryCount;
  1157. index += 1, thisEntry += 1)
  1158. {
  1159. if (*thisEntry == 0)
  1160. continue;
  1161. printf (" %6d %5d\n", index, *thisEntry);
  1162. }
  1163. }
  1164. #endif
  1165. /***********************************************************************
  1166. * _class_printMethodCacheStatistics.
  1167. **********************************************************************/
  1168. #define MAX_LOG2_SIZE 32
  1169. #define MAX_CHAIN_SIZE 100
  1170. void _class_printMethodCacheStatistics(void)
  1171. {
  1172. unsigned int isMeta;
  1173. unsigned int index;
  1174. NXHashState state;
  1175. Class cls;
  1176. unsigned int totalChain;
  1177. unsigned int totalMissChain;
  1178. unsigned int maxChain;
  1179. unsigned int maxMissChain;
  1180. unsigned int classCount;
  1181. unsigned int negativeEntryCount;
  1182. unsigned int cacheExpandCount;
  1183. unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1184. unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1185. unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1186. unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1187. unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1188. unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1189. unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1190. unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1191. unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
  1192. unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
  1193. unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
  1194. #ifdef OBJC_INSTRUMENTED
  1195. unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1196. unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1197. unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1198. unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1199. unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1200. unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1201. unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
  1202. unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1203. unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
  1204. #endif
  1205. printf ("Printing cache statistics\n");
  1206. // Outermost loop - iterate over all classes
  1207. state = NXInitHashState (class_hash);
  1208. classCount = 0;
  1209. negativeEntryCount = 0;
  1210. cacheExpandCount = 0;
  1211. while (NXNextHashState (class_hash, &state, (void **) &cls))
  1212. {
  1213. // Tally classes
  1214. classCount += 1;
  1215. // Control loop - do given class' cache, then its isa's cache
  1216. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1217. {
  1218. Cache cache;
  1219. unsigned int mask;
  1220. unsigned int log2Size;
  1221. unsigned int entryCount;
  1222. // Select cache of interest
  1223. cache = (isMeta ? cls->ISA : cls)->cache;
  1224. // Ignore empty cache... should we?
  1225. if (_cache_isEmpty(cache))
  1226. continue;
  1227. // Middle loop - do each entry in the given cache
  1228. mask = cache->mask;
  1229. entryCount = 0;
  1230. totalChain = 0;
  1231. totalMissChain = 0;
  1232. maxChain = 0;
  1233. maxMissChain = 0;
  1234. for (index = 0; index < mask + 1; index += 1)
  1235. {
  1236. cache_entry **buckets;
  1237. cache_entry *entry;
  1238. unsigned int hash;
  1239. unsigned int methodChain;
  1240. unsigned int methodMissChain;
  1241. unsigned int index2;
  1242. // If entry is invalid, the only item of
  1243. // interest is that future insert hashes
  1244. // to this entry can use it directly.
  1245. buckets = (cache_entry **)cache->buckets;
  1246. if (!buckets[index])
  1247. {
  1248. missChainCount[0] += 1;
  1249. continue;
  1250. }
  1251. entry = buckets[index];
  1252. // Tally valid entries
  1253. entryCount += 1;
  1254. // Tally "forward::" entries
  1255. if (entry->imp == _objc_msgForward_impcache)
  1256. negativeEntryCount += 1;
  1257. // Calculate search distance (chain length) for this method
  1258. // The chain may wrap around to the beginning of the table.
  1259. hash = CACHE_HASH(entry->name, mask);
  1260. if (index >= hash) methodChain = index - hash;
  1261. else methodChain = (mask+1) + index - hash;
  1262. // Tally chains of this length
  1263. if (methodChain < MAX_CHAIN_SIZE)
  1264. chainCount[methodChain] += 1;
  1265. // Keep sum of all chain lengths
  1266. totalChain += methodChain;
  1267. // Record greatest chain length
  1268. if (methodChain > maxChain)
  1269. maxChain = methodChain;
  1270. // Calculate search distance for miss that hashes here
  1271. index2 = index;
  1272. while (buckets[index2])
  1273. {
  1274. index2 += 1;
  1275. index2 &= mask;
  1276. }
  1277. methodMissChain = ((index2 - index) & mask);
  1278. // Tally miss chains of this length
  1279. if (methodMissChain < MAX_CHAIN_SIZE)
  1280. missChainCount[methodMissChain] += 1;
  1281. // Keep sum of all miss chain lengths in this class
  1282. totalMissChain += methodMissChain;
  1283. // Record greatest miss chain length
  1284. if (methodMissChain > maxMissChain)
  1285. maxMissChain = methodMissChain;
  1286. }
  1287. // Factor this cache into statistics about caches of the same
  1288. // type and size (all caches are a power of two in size)
  1289. log2Size = log2u (mask + 1);
  1290. cacheCountBySize[isMeta][log2Size] += 1;
  1291. totalEntriesBySize[isMeta][log2Size] += entryCount;
  1292. if (entryCount > maxEntriesBySize[isMeta][log2Size])
  1293. maxEntriesBySize[isMeta][log2Size] = entryCount;
  1294. totalChainBySize[isMeta][log2Size] += totalChain;
  1295. totalMissChainBySize[isMeta][log2Size] += totalMissChain;
  1296. totalMaxChainBySize[isMeta][log2Size] += maxChain;
  1297. totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
  1298. if (maxChain > maxChainBySize[isMeta][log2Size])
  1299. maxChainBySize[isMeta][log2Size] = maxChain;
  1300. if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
  1301. maxMissChainBySize[isMeta][log2Size] = maxMissChain;
  1302. #ifdef OBJC_INSTRUMENTED
  1303. {
  1304. CacheInstrumentation *cacheData;
  1305. cacheData = CACHE_INSTRUMENTATION(cache);
  1306. hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
  1307. hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
  1308. if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
  1309. maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
  1310. missCountBySize[isMeta][log2Size] += cacheData->missCount;
  1311. missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
  1312. if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
  1313. maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
  1314. flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
  1315. flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
  1316. if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
  1317. maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
  1318. }
  1319. #endif
  1320. // Caches start with a power of two number of entries, and grow by doubling, so
  1321. // we can calculate the number of times this cache has expanded
  1322. cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
  1323. }
  1324. }
  1325. {
  1326. unsigned int cacheCountByType[2] = {0};
  1327. unsigned int totalCacheCount = 0;
  1328. unsigned int totalEntries = 0;
  1329. unsigned int maxEntries = 0;
  1330. unsigned int totalSlots = 0;
  1331. #ifdef OBJC_INSTRUMENTED
  1332. unsigned int totalHitCount = 0;
  1333. unsigned int totalHitProbes = 0;
  1334. unsigned int maxHitProbes = 0;
  1335. unsigned int totalMissCount = 0;
  1336. unsigned int totalMissProbes = 0;
  1337. unsigned int maxMissProbes = 0;
  1338. unsigned int totalFlushCount = 0;
  1339. unsigned int totalFlushedEntries = 0;
  1340. unsigned int maxFlushedEntries = 0;
  1341. #endif
  1342. totalChain = 0;
  1343. maxChain = 0;
  1344. totalMissChain = 0;
  1345. maxMissChain = 0;
  1346. // Sum information over all caches
  1347. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1348. {
  1349. for (index = 0; index < MAX_LOG2_SIZE; index += 1)
  1350. {
  1351. cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
  1352. totalEntries += totalEntriesBySize[isMeta][index];
  1353. totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
  1354. totalChain += totalChainBySize[isMeta][index];
  1355. if (maxEntriesBySize[isMeta][index] > maxEntries)
  1356. maxEntries = maxEntriesBySize[isMeta][index];
  1357. if (maxChainBySize[isMeta][index] > maxChain)
  1358. maxChain = maxChainBySize[isMeta][index];
  1359. totalMissChain += totalMissChainBySize[isMeta][index];
  1360. if (maxMissChainBySize[isMeta][index] > maxMissChain)
  1361. maxMissChain = maxMissChainBySize[isMeta][index];
  1362. #ifdef OBJC_INSTRUMENTED
  1363. totalHitCount += hitCountBySize[isMeta][index];
  1364. totalHitProbes += hitProbesBySize[isMeta][index];
  1365. if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
  1366. maxHitProbes = maxHitProbesBySize[isMeta][index];
  1367. totalMissCount += missCountBySize[isMeta][index];
  1368. totalMissProbes += missProbesBySize[isMeta][index];
  1369. if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
  1370. maxMissProbes = maxMissProbesBySize[isMeta][index];
  1371. totalFlushCount += flushCountBySize[isMeta][index];
  1372. totalFlushedEntries += flushedEntriesBySize[isMeta][index];
  1373. if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
  1374. maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
  1375. #endif
  1376. }
  1377. totalCacheCount += cacheCountByType[isMeta];
  1378. }
  1379. // Log our findings
  1380. printf ("There are %u classes\n", classCount);
  1381. for (isMeta = 0; isMeta <= 1; isMeta += 1)
  1382. {
  1383. // Number of this type of class
  1384. printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
  1385. cacheCountByType[isMeta],
  1386. isMeta ? "class" : "instance");
  1387. // Print header
  1388. PrintCacheHeader ();
  1389. // Keep format consistent even if there are caches of this kind
  1390. if (cacheCountByType[isMeta] == 0)
  1391. {
  1392. printf ("(none)\n");
  1393. continue;
  1394. }
  1395. // Usage information by cache size
  1396. for (index = 0; index < MAX_LOG2_SIZE; index += 1)
  1397. {
  1398. unsigned int cacheCount;
  1399. unsigned int cacheSlotCount;
  1400. unsigned int cacheEntryCount;
  1401. // Get number of caches of this type and size
  1402. cacheCount = cacheCountBySize[isMeta][index];
  1403. if (cacheCount == 0)
  1404. continue;
  1405. // Get the cache slot count and the total number of valid entries
  1406. cacheSlotCount = (1 << index);
  1407. cacheEntryCount = totalEntriesBySize[isMeta][index];
  1408. // Give the analysis
  1409. PrintCacheInfo (cacheSlotCount,
  1410. cacheCount,
  1411. cacheEntryCount,
  1412. (float) cacheEntryCount / (float) cacheCount,
  1413. maxEntriesBySize[isMeta][index],
  1414. (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
  1415. maxChainBySize[isMeta][index],
  1416. (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
  1417. maxMissChainBySize[isMeta][index]
  1418. #ifdef OBJC_INSTRUMENTED
  1419. , hitCountBySize[isMeta][index],
  1420. hitCountBySize[isMeta][index] ?
  1421. (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
  1422. maxHitProbesBySize[isMeta][index],
  1423. missCountBySize[isMeta][index],
  1424. missCountBySize[isMeta][index] ?
  1425. (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
  1426. maxMissProbesBySize[isMeta][index],
  1427. flushCountBySize[isMeta][index],
  1428. flushCountBySize[isMeta][index] ?
  1429. (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
  1430. maxFlushedEntriesBySize[isMeta][index]
  1431. #endif
  1432. );
  1433. }
  1434. }
  1435. // Give overall numbers
  1436. printf ("\nCumulative:\n");
  1437. PrintCacheHeader ();
  1438. PrintCacheInfo (totalSlots,
  1439. totalCacheCount,
  1440. totalEntries,
  1441. (float) totalEntries / (float) totalCacheCount,
  1442. maxEntries,
  1443. (float) totalChain / (float) totalEntries,
  1444. maxChain,
  1445. (float) totalMissChain / (float) totalSlots,
  1446. maxMissChain
  1447. #ifdef OBJC_INSTRUMENTED
  1448. , totalHitCount,
  1449. totalHitCount ?
  1450. (float) totalHitProbes / (float) totalHitCount : 0.0,
  1451. maxHitProbes,
  1452. totalMissCount,
  1453. totalMissCount ?
  1454. (float) totalMissProbes / (float) totalMissCount : 0.0,
  1455. maxMissProbes,
  1456. totalFlushCount,
  1457. totalFlushCount ?
  1458. (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
  1459. maxFlushedEntries
  1460. #endif
  1461. );
  1462. printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
  1463. printf ("Number of cache expansions: %d\n", cacheExpandCount);
  1464. #ifdef OBJC_INSTRUMENTED
  1465. printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
  1466. printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
  1467. printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
  1468. LinearFlushCachesCount,
  1469. LinearFlushCachesVisitedCount,
  1470. LinearFlushCachesCount ?
  1471. (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
  1472. MaxLinearFlushCachesVisitedCount,
  1473. LinearFlushCachesVisitedCount,
  1474. 1.0);
  1475. printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
  1476. NonlinearFlushCachesCount,
  1477. NonlinearFlushCachesVisitedCount,
  1478. NonlinearFlushCachesCount ?
  1479. (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
  1480. MaxNonlinearFlushCachesVisitedCount,
  1481. NonlinearFlushCachesClassCount,
  1482. NonlinearFlushCachesClassCount ?
  1483. (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
  1484. printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
  1485. LinearFlushCachesCount + NonlinearFlushCachesCount,
  1486. IdealFlushCachesCount,
  1487. LinearFlushCachesCount + NonlinearFlushCachesCount ?
  1488. (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
  1489. MaxIdealFlushCachesCount,
  1490. LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
  1491. LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
  1492. (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
  1493. PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
  1494. PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
  1495. #endif
  1496. #if 0
  1497. printf ("\nLookup chains:");
  1498. for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
  1499. {
  1500. if (chainCount[index] != 0)
  1501. printf (" %u:%u", index, chainCount[index]);
  1502. }
  1503. printf ("\nMiss chains:");
  1504. for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
  1505. {
  1506. if (missChainCount[index] != 0)
  1507. printf (" %u:%u", index, missChainCount[index]);
  1508. }
  1509. printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
  1510. totalCacheCount * (sizeof(struct objc_cache) - sizeof(cache_entry *)) +
  1511. totalSlots * sizeof(cache_entry *) +
  1512. negativeEntryCount * sizeof(cache_entry));
  1513. #endif
  1514. }
  1515. }
  1516. #endif
  1517. // !__OBJC2__
  1518. #endif