objc-cache.mm 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. /*
  2. * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. /***********************************************************************
  24. * objc-cache.m
  25. * Method cache management
  26. * Cache flushing
  27. * Cache garbage collection
  28. * Cache instrumentation
  29. * Dedicated allocator for large caches
  30. **********************************************************************/
  31. /***********************************************************************
  32. * Method cache locking (GrP 2001-1-14)
  33. *
  34. * For speed, objc_msgSend does not acquire any locks when it reads
  35. * method caches. Instead, all cache changes are performed so that any
  36. * objc_msgSend running concurrently with the cache mutator will not
  37. * crash or hang or get an incorrect result from the cache.
  38. *
  39. * When cache memory becomes unused (e.g. the old cache after cache
  40. * expansion), it is not immediately freed, because a concurrent
  41. * objc_msgSend could still be using it. Instead, the memory is
  42. * disconnected from the data structures and placed on a garbage list.
  43. * The memory is now only accessible to instances of objc_msgSend that
  44. * were running when the memory was disconnected; any further calls to
  45. * objc_msgSend will not see the garbage memory because the other data
  46. * structures don't point to it anymore. The collecting_in_critical
  47. * function checks the PC of all threads and returns FALSE when all threads
  48. * are found to be outside objc_msgSend. This means any call to objc_msgSend
  49. * that could have had access to the garbage has finished or moved past the
  50. * cache lookup stage, so it is safe to free the memory.
  51. *
  52. * All functions that modify cache data or structures must acquire the
  53. * cacheUpdateLock to prevent interference from concurrent modifications.
  54. * The function that frees cache garbage must acquire the cacheUpdateLock
  55. * and use collecting_in_critical() to flush out cache readers.
  56. * The cacheUpdateLock is also used to protect the custom allocator used
  57. * for large method cache blocks.
  58. *
  59. * Cache readers (PC-checked by collecting_in_critical())
  60. * objc_msgSend*
  61. * cache_getImp
  62. *
  63. * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
  64. * cache_fill (acquires lock)
  65. * cache_expand (only called from cache_fill)
  66. * cache_create (only called from cache_expand)
  67. * bcopy (only called from instrumented cache_expand)
  68. * flush_caches (acquires lock)
  69. * cache_flush (only called from cache_fill and flush_caches)
  70. * cache_collect_free (only called from cache_expand and cache_flush)
  71. *
  72. * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
  73. * cache_print
  74. * _class_printMethodCaches
  75. * _class_printDuplicateCacheEntries
  76. * _class_printMethodCacheStatistics
  77. *
  78. ***********************************************************************/
  79. #if __OBJC2__
  80. #include "objc-private.h"
  81. #include "objc-cache.h"
  82. /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
  83. enum {
  84. INIT_CACHE_SIZE_LOG2 = 2,
  85. INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
  86. MAX_CACHE_SIZE_LOG2 = 16,
  87. MAX_CACHE_SIZE = (1 << MAX_CACHE_SIZE_LOG2),
  88. };
  89. static void cache_collect_free(struct bucket_t *data, mask_t capacity);
  90. static int _collecting_in_critical(void);
  91. static void _garbage_make_room(void);
  92. /***********************************************************************
  93. * Cache statistics for OBJC_PRINT_CACHE_SETUP
  94. **********************************************************************/
  95. static unsigned int cache_counts[16];
  96. static size_t cache_allocations;
  97. static size_t cache_collections;
  98. static void recordNewCache(mask_t capacity)
  99. {
  100. size_t bucket = log2u(capacity);
  101. if (bucket < countof(cache_counts)) {
  102. cache_counts[bucket]++;
  103. }
  104. cache_allocations++;
  105. }
  106. static void recordDeadCache(mask_t capacity)
  107. {
  108. size_t bucket = log2u(capacity);
  109. if (bucket < countof(cache_counts)) {
  110. cache_counts[bucket]--;
  111. }
  112. }
  113. /***********************************************************************
  114. * Pointers used by compiled class objects
  115. * These use asm to avoid conflicts with the compiler's internal declarations
  116. **********************************************************************/
  117. // EMPTY_BYTES includes space for a cache end marker bucket.
  118. // This end marker doesn't actually have the wrap-around pointer
  119. // because cache scans always find an empty bucket before they might wrap.
  120. // 1024 buckets is fairly common.
  121. #if DEBUG
  122. // Use a smaller size to exercise heap-allocated empty caches.
  123. # define EMPTY_BYTES ((8+1)*16)
  124. #else
  125. # define EMPTY_BYTES ((1024+1)*16)
  126. #endif
  127. #define stringize(x) #x
  128. #define stringize2(x) stringize(x)
  129. // "cache" is cache->buckets; "vtable" is cache->mask/occupied
  130. // hack to avoid conflicts with compiler's internal declaration
  131. asm("\n .section __TEXT,__const"
  132. "\n .globl __objc_empty_vtable"
  133. "\n .set __objc_empty_vtable, 0"
  134. "\n .globl __objc_empty_cache"
  135. #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
  136. "\n .align 4"
  137. "\n L__objc_empty_cache: .space " stringize2(EMPTY_BYTES)
  138. "\n .set __objc_empty_cache, L__objc_empty_cache + 0xf"
  139. #else
  140. "\n .align 3"
  141. "\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES)
  142. #endif
  143. );
  144. #if __arm__ || __x86_64__ || __i386__
  145. // objc_msgSend has few registers available.
  146. // Cache scan increments and wraps at special end-marking bucket.
  147. #define CACHE_END_MARKER 1
  148. static inline mask_t cache_next(mask_t i, mask_t mask) {
  149. return (i+1) & mask;
  150. }
  151. #elif __arm64__
  152. // objc_msgSend has lots of registers available.
  153. // Cache scan decrements. No end marker needed.
  154. #define CACHE_END_MARKER 0
  155. static inline mask_t cache_next(mask_t i, mask_t mask) {
  156. return i ? i-1 : mask;
  157. }
  158. #else
  159. #error unknown architecture
  160. #endif
  161. // mega_barrier doesn't really work, but it works enough on ARM that
  162. // we leave well enough alone and keep using it there.
  163. #if __arm__
  164. #define mega_barrier() \
  165. __asm__ __volatile__( \
  166. "dsb ish" \
  167. : : : "memory")
  168. #endif
  169. #if __arm64__
  170. // Pointer-size register prefix for inline asm
  171. # if __LP64__
  172. # define p "x" // true arm64
  173. # else
  174. # define p "w" // arm64_32
  175. # endif
  176. // Use atomic double-word instructions to update cache entries.
  177. // This requires cache buckets not cross cache line boundaries.
  178. static ALWAYS_INLINE void
  179. stp(uintptr_t onep, uintptr_t twop, void *destp)
  180. {
  181. __asm__ ("stp %" p "[one], %" p "[two], [%x[dest]]"
  182. : "=m" (((uintptr_t *)(destp))[0]),
  183. "=m" (((uintptr_t *)(destp))[1])
  184. : [one] "r" (onep),
  185. [two] "r" (twop),
  186. [dest] "r" (destp)
  187. : /* no clobbers */
  188. );
  189. }
  190. static ALWAYS_INLINE void __unused
  191. ldp(uintptr_t& onep, uintptr_t& twop, const void *srcp)
  192. {
  193. __asm__ ("ldp %" p "[one], %" p "[two], [%x[src]]"
  194. : [one] "=r" (onep),
  195. [two] "=r" (twop)
  196. : "m" (((const uintptr_t *)(srcp))[0]),
  197. "m" (((const uintptr_t *)(srcp))[1]),
  198. [src] "r" (srcp)
  199. : /* no clobbers */
  200. );
  201. }
  202. #undef p
  203. #endif
  204. // Class points to cache. SEL is key. Cache buckets store SEL+IMP.
  205. // Caches are never built in the dyld shared cache.
  206. static inline mask_t cache_hash(SEL sel, mask_t mask)
  207. {
  208. return (mask_t)(uintptr_t)sel & mask;
  209. }
  210. cache_t *getCache(Class cls)
  211. {
  212. ASSERT(cls);
  213. return &cls->cache;
  214. }
  215. #if __arm64__
  216. template<Atomicity atomicity, IMPEncoding impEncoding>
  217. void bucket_t::set(SEL newSel, IMP newImp, Class cls)
  218. {
  219. ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
  220. _sel.load(memory_order::memory_order_relaxed) == newSel);
  221. static_assert(offsetof(bucket_t,_imp) == 0 &&
  222. offsetof(bucket_t,_sel) == sizeof(void *),
  223. "bucket_t layout doesn't match arm64 bucket_t::set()");
  224. uintptr_t encodedImp = (impEncoding == Encoded
  225. ? encodeImp(newImp, newSel, cls)
  226. : (uintptr_t)newImp);
  227. // LDP/STP guarantees that all observers get
  228. // either imp/sel or newImp/newSel
  229. stp(encodedImp, (uintptr_t)newSel, this);
  230. }
  231. #else
  232. template<Atomicity atomicity, IMPEncoding impEncoding>
  233. void bucket_t::set(SEL newSel, IMP newImp, Class cls)
  234. {
  235. ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
  236. _sel.load(memory_order::memory_order_relaxed) == newSel);
  237. // objc_msgSend uses sel and imp with no locks.
  238. // It is safe for objc_msgSend to see new imp but NULL sel
  239. // (It will get a cache miss but not dispatch to the wrong place.)
  240. // It is unsafe for objc_msgSend to see old imp and new sel.
  241. // Therefore we write new imp, wait a lot, then write new sel.
  242. uintptr_t newIMP = (impEncoding == Encoded
  243. ? encodeImp(newImp, newSel, cls)
  244. : (uintptr_t)newImp);
  245. if (atomicity == Atomic) {
  246. _imp.store(newIMP, memory_order::memory_order_relaxed);
  247. if (_sel.load(memory_order::memory_order_relaxed) != newSel) {
  248. #ifdef __arm__
  249. mega_barrier();
  250. _sel.store(newSel, memory_order::memory_order_relaxed);
  251. #elif __x86_64__ || __i386__
  252. _sel.store(newSel, memory_order::memory_order_release);
  253. #else
  254. #error Don't know how to do bucket_t::set on this architecture.
  255. #endif
  256. }
  257. } else {
  258. _imp.store(newIMP, memory_order::memory_order_relaxed);
  259. _sel.store(newSel, memory_order::memory_order_relaxed);
  260. }
  261. }
  262. #endif
  263. #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
  264. void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
  265. {
  266. // objc_msgSend uses mask and buckets with no locks.
  267. // It is safe for objc_msgSend to see new buckets but old mask.
  268. // (It will get a cache miss but not overrun the buckets' bounds).
  269. // It is unsafe for objc_msgSend to see old buckets and new mask.
  270. // Therefore we write new buckets, wait a lot, then write new mask.
  271. // objc_msgSend reads mask first, then buckets.
  272. #ifdef __arm__
  273. // ensure other threads see buckets contents before buckets pointer
  274. mega_barrier();
  275. _buckets.store(newBuckets, memory_order::memory_order_relaxed);
  276. // ensure other threads see new buckets before new mask
  277. mega_barrier();
  278. _mask.store(newMask, memory_order::memory_order_relaxed);
  279. _occupied = 0;
  280. #elif __x86_64__ || i386
  281. // ensure other threads see buckets contents before buckets pointer
  282. _buckets.store(newBuckets, memory_order::memory_order_release);
  283. // ensure other threads see new buckets before new mask
  284. _mask.store(newMask, memory_order::memory_order_release);
  285. _occupied = 0;
  286. #else
  287. #error Don't know how to do setBucketsAndMask on this architecture.
  288. #endif
  289. }
  290. struct bucket_t *cache_t::emptyBuckets()
  291. {
  292. return (bucket_t *)&_objc_empty_cache;
  293. }
  294. struct bucket_t *cache_t::buckets()
  295. {
  296. return _buckets.load(memory_order::memory_order_relaxed);
  297. }
  298. mask_t cache_t::mask()
  299. {
  300. return _mask.load(memory_order::memory_order_relaxed);
  301. }
  302. void cache_t::initializeToEmpty()
  303. {
  304. bzero(this, sizeof(*this));
  305. _buckets.store((bucket_t *)&_objc_empty_cache, memory_order::memory_order_relaxed);
  306. }
  307. #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
  308. void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
  309. {
  310. uintptr_t buckets = (uintptr_t)newBuckets;
  311. uintptr_t mask = (uintptr_t)newMask;
  312. ASSERT(buckets <= bucketsMask);
  313. ASSERT(mask <= maxMask);
  314. _maskAndBuckets.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, std::memory_order_relaxed);
  315. _occupied = 0;
  316. }
  317. struct bucket_t *cache_t::emptyBuckets()
  318. {
  319. return (bucket_t *)&_objc_empty_cache;
  320. }
  321. struct bucket_t *cache_t::buckets()
  322. {
  323. uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
  324. return (bucket_t *)(maskAndBuckets & bucketsMask);
  325. }
  326. mask_t cache_t::mask()
  327. {
  328. uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
  329. return maskAndBuckets >> maskShift;
  330. }
  331. void cache_t::initializeToEmpty()
  332. {
  333. bzero(this, sizeof(*this));
  334. _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
  335. }
  336. #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
  337. void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
  338. {
  339. uintptr_t buckets = (uintptr_t)newBuckets;
  340. unsigned mask = (unsigned)newMask;
  341. ASSERT(buckets == (buckets & bucketsMask));
  342. ASSERT(mask <= 0xffff);
  343. // The shift amount is equal to the number of leading zeroes in
  344. // the last 16 bits of mask. Count all the leading zeroes, then
  345. // subtract to ignore the top half.
  346. uintptr_t maskShift = __builtin_clz(mask) - (sizeof(mask) * CHAR_BIT - 16);
  347. ASSERT(mask == (0xffff >> maskShift));
  348. _maskAndBuckets.store(buckets | maskShift, memory_order::memory_order_relaxed);
  349. _occupied = 0;
  350. ASSERT(this->buckets() == newBuckets);
  351. ASSERT(this->mask() == newMask);
  352. }
  353. struct bucket_t *cache_t::emptyBuckets()
  354. {
  355. return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask);
  356. }
  357. struct bucket_t *cache_t::buckets()
  358. {
  359. uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
  360. return (bucket_t *)(maskAndBuckets & bucketsMask);
  361. }
  362. mask_t cache_t::mask()
  363. {
  364. uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
  365. uintptr_t maskShift = (maskAndBuckets & maskMask);
  366. return 0xffff >> maskShift;
  367. }
  368. void cache_t::initializeToEmpty()
  369. {
  370. bzero(this, sizeof(*this));
  371. _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
  372. }
  373. #else
  374. #error Unknown cache mask storage type.
  375. #endif
  376. mask_t cache_t::occupied()
  377. {
  378. return _occupied;
  379. }
  380. void cache_t::incrementOccupied()
  381. {
  382. _occupied++;
  383. }
  384. unsigned cache_t::capacity()
  385. {
  386. return mask() ? mask()+1 : 0;
  387. }
  388. #if CACHE_END_MARKER
  389. size_t cache_t::bytesForCapacity(uint32_t cap)
  390. {
  391. // fixme put end marker inline when capacity+1 malloc is inefficient
  392. return sizeof(bucket_t) * (cap + 1);
  393. }
  394. bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
  395. {
  396. // bytesForCapacity() chooses whether the end marker is inline or not
  397. return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
  398. }
  399. bucket_t *allocateBuckets(mask_t newCapacity)
  400. {
  401. // Allocate one extra bucket to mark the end of the list.
  402. // This can't overflow mask_t because newCapacity is a power of 2.
  403. // fixme instead put the end mark inline when +1 is malloc-inefficient
  404. bucket_t *newBuckets = (bucket_t *)
  405. calloc(cache_t::bytesForCapacity(newCapacity), 1);
  406. bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
  407. #if __arm__
  408. // End marker's sel is 1 and imp points BEFORE the first bucket.
  409. // This saves an instruction in objc_msgSend.
  410. end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
  411. #else
  412. // End marker's sel is 1 and imp points to the first bucket.
  413. end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)newBuckets, nil);
  414. #endif
  415. if (PrintCaches) recordNewCache(newCapacity);
  416. return newBuckets;
  417. }
  418. #else
  419. size_t cache_t::bytesForCapacity(uint32_t cap)
  420. {
  421. return sizeof(bucket_t) * cap;
  422. }
  423. bucket_t *allocateBuckets(mask_t newCapacity)
  424. {
  425. if (PrintCaches) recordNewCache(newCapacity);
  426. return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1);
  427. }
  428. #endif
  429. bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
  430. {
  431. #if CONFIG_USE_CACHE_LOCK
  432. cacheUpdateLock.assertLocked();
  433. #else
  434. runtimeLock.assertLocked();
  435. #endif
  436. size_t bytes = cache_t::bytesForCapacity(capacity);
  437. // Use _objc_empty_cache if the buckets is small enough.
  438. if (bytes <= EMPTY_BYTES) {
  439. return cache_t::emptyBuckets();
  440. }
  441. // Use shared empty buckets allocated on the heap.
  442. static bucket_t **emptyBucketsList = nil;
  443. static mask_t emptyBucketsListCount = 0;
  444. mask_t index = log2u(capacity);
  445. if (index >= emptyBucketsListCount) {
  446. if (!allocate) return nil;
  447. mask_t newListCount = index + 1;
  448. bucket_t *newBuckets = (bucket_t *)calloc(bytes, 1);
  449. emptyBucketsList = (bucket_t**)
  450. realloc(emptyBucketsList, newListCount * sizeof(bucket_t *));
  451. // Share newBuckets for every un-allocated size smaller than index.
  452. // The array is therefore always fully populated.
  453. for (mask_t i = emptyBucketsListCount; i < newListCount; i++) {
  454. emptyBucketsList[i] = newBuckets;
  455. }
  456. emptyBucketsListCount = newListCount;
  457. if (PrintCaches) {
  458. _objc_inform("CACHES: new empty buckets at %p (capacity %zu)",
  459. newBuckets, (size_t)capacity);
  460. }
  461. }
  462. return emptyBucketsList[index];
  463. }
  464. bool cache_t::isConstantEmptyCache()
  465. {
  466. return
  467. occupied() == 0 &&
  468. buckets() == emptyBucketsForCapacity(capacity(), false);
  469. }
  470. bool cache_t::canBeFreed()
  471. {
  472. return !isConstantEmptyCache();
  473. }
  474. ALWAYS_INLINE
  475. void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld)
  476. {
  477. bucket_t *oldBuckets = buckets();
  478. bucket_t *newBuckets = allocateBuckets(newCapacity);
  479. // Cache's old contents are not propagated.
  480. // This is thought to save cache memory at the cost of extra cache fills.
  481. // fixme re-measure this
  482. ASSERT(newCapacity > 0);
  483. ASSERT((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
  484. setBucketsAndMask(newBuckets, newCapacity - 1);
  485. if (freeOld) {
  486. cache_collect_free(oldBuckets, oldCapacity);
  487. }
  488. }
  489. void cache_t::bad_cache(id receiver, SEL sel, Class isa)
  490. {
  491. // Log in separate steps in case the logging itself causes a crash.
  492. _objc_inform_now_and_on_crash
  493. ("Method cache corrupted. This may be a message to an "
  494. "invalid object, or a memory error somewhere else.");
  495. cache_t *cache = &isa->cache;
  496. #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
  497. bucket_t *buckets = cache->_buckets.load(memory_order::memory_order_relaxed);
  498. _objc_inform_now_and_on_crash
  499. ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
  500. "mask 0x%x, occupied 0x%x",
  501. receiver ? "receiver" : "unused", receiver,
  502. sel, isa, cache, buckets,
  503. cache->_mask.load(memory_order::memory_order_relaxed),
  504. cache->_occupied);
  505. _objc_inform_now_and_on_crash
  506. ("%s %zu bytes, buckets %zu bytes",
  507. receiver ? "receiver" : "unused", malloc_size(receiver),
  508. malloc_size(buckets));
  509. #elif (CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || \
  510. CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4)
  511. uintptr_t maskAndBuckets = cache->_maskAndBuckets.load(memory_order::memory_order_relaxed);
  512. _objc_inform_now_and_on_crash
  513. ("%s %p, SEL %p, isa %p, cache %p, buckets and mask 0x%lx, "
  514. "occupied 0x%x",
  515. receiver ? "receiver" : "unused", receiver,
  516. sel, isa, cache, maskAndBuckets,
  517. cache->_occupied);
  518. _objc_inform_now_and_on_crash
  519. ("%s %zu bytes, buckets %zu bytes",
  520. receiver ? "receiver" : "unused", malloc_size(receiver),
  521. malloc_size(cache->buckets()));
  522. #else
  523. #error Unknown cache mask storage type.
  524. #endif
  525. _objc_inform_now_and_on_crash
  526. ("selector '%s'", sel_getName(sel));
  527. _objc_inform_now_and_on_crash
  528. ("isa '%s'", isa->nameForLogging());
  529. _objc_fatal
  530. ("Method cache corrupted. This may be a message to an "
  531. "invalid object, or a memory error somewhere else.");
  532. }
  533. ALWAYS_INLINE
  534. void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
  535. {
  536. #if CONFIG_USE_CACHE_LOCK
  537. cacheUpdateLock.assertLocked();
  538. #else
  539. runtimeLock.assertLocked();
  540. #endif
  541. ASSERT(sel != 0 && cls->isInitialized());
  542. // Use the cache as-is if it is less than 3/4 full
  543. mask_t newOccupied = occupied() + 1;
  544. unsigned oldCapacity = capacity(), capacity = oldCapacity;
  545. if (slowpath(isConstantEmptyCache())) {
  546. // Cache is read-only. Replace it.
  547. if (!capacity) capacity = INIT_CACHE_SIZE;
  548. reallocate(oldCapacity, capacity, /* freeOld */false);
  549. }
  550. else if (fastpath(newOccupied <= capacity / 4 * 3)) {
  551. // Cache is less than 3/4 full. Use it as-is.
  552. }
  553. else {
  554. capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
  555. if (capacity > MAX_CACHE_SIZE) {
  556. capacity = MAX_CACHE_SIZE;
  557. }
  558. reallocate(oldCapacity, capacity, true);
  559. }
  560. bucket_t *b = buckets();
  561. mask_t m = capacity - 1;
  562. mask_t begin = cache_hash(sel, m);
  563. mask_t i = begin;
  564. // Scan for the first unused slot and insert there.
  565. // There is guaranteed to be an empty slot because the
  566. // minimum size is 4 and we resized at 3/4 full.
  567. do {
  568. if (fastpath(b[i].sel() == 0)) {
  569. incrementOccupied();
  570. b[i].set<Atomic, Encoded>(sel, imp, cls);
  571. return;
  572. }
  573. if (b[i].sel() == sel) {
  574. // The entry was added to the cache by some other thread
  575. // before we grabbed the cacheUpdateLock.
  576. return;
  577. }
  578. } while (fastpath((i = cache_next(i, m)) != begin));
  579. cache_t::bad_cache(receiver, (SEL)sel, cls);
  580. }
  581. void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
  582. {
  583. runtimeLock.assertLocked();
  584. #if !DEBUG_TASK_THREADS
  585. // Never cache before +initialize is done
  586. if (cls->isInitialized()) {
  587. cache_t *cache = getCache(cls);
  588. #if CONFIG_USE_CACHE_LOCK
  589. mutex_locker_t lock(cacheUpdateLock);
  590. #endif
  591. cache->insert(cls, sel, imp, receiver);
  592. }
  593. #else
  594. _collecting_in_critical();
  595. #endif
  596. }
  597. // Reset this entire cache to the uncached lookup by reallocating it.
  598. // This must not shrink the cache - that breaks the lock-free scheme.
  599. void cache_erase_nolock(Class cls)
  600. {
  601. #if CONFIG_USE_CACHE_LOCK
  602. cacheUpdateLock.assertLocked();
  603. #else
  604. runtimeLock.assertLocked();
  605. #endif
  606. cache_t *cache = getCache(cls);
  607. mask_t capacity = cache->capacity();
  608. if (capacity > 0 && cache->occupied() > 0) {
  609. auto oldBuckets = cache->buckets();
  610. auto buckets = emptyBucketsForCapacity(capacity);
  611. cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
  612. cache_collect_free(oldBuckets, capacity);
  613. }
  614. }
  615. void cache_delete(Class cls)
  616. {
  617. #if CONFIG_USE_CACHE_LOCK
  618. mutex_locker_t lock(cacheUpdateLock);
  619. #else
  620. runtimeLock.assertLocked();
  621. #endif
  622. if (cls->cache.canBeFreed()) {
  623. if (PrintCaches) recordDeadCache(cls->cache.capacity());
  624. free(cls->cache.buckets());
  625. }
  626. }
  627. /***********************************************************************
  628. * cache collection.
  629. **********************************************************************/
  630. #if !TARGET_OS_WIN32
  631. // A sentinel (magic value) to report bad thread_get_state status.
  632. // Must not be a valid PC.
  633. // Must not be zero - thread_get_state() on a new thread returns PC == 0.
  634. #define PC_SENTINEL 1
  635. static uintptr_t _get_pc_for_thread(thread_t thread)
  636. #if defined(__i386__)
  637. {
  638. i386_thread_state_t state;
  639. unsigned int count = i386_THREAD_STATE_COUNT;
  640. kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
  641. return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
  642. }
  643. #elif defined(__x86_64__)
  644. {
  645. x86_thread_state64_t state;
  646. unsigned int count = x86_THREAD_STATE64_COUNT;
  647. kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
  648. return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
  649. }
  650. #elif defined(__arm__)
  651. {
  652. arm_thread_state_t state;
  653. unsigned int count = ARM_THREAD_STATE_COUNT;
  654. kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
  655. return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
  656. }
  657. #elif defined(__arm64__)
  658. {
  659. arm_thread_state64_t state;
  660. unsigned int count = ARM_THREAD_STATE64_COUNT;
  661. kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
  662. return (okay == KERN_SUCCESS) ? (uintptr_t)arm_thread_state64_get_pc(state) : PC_SENTINEL;
  663. }
  664. #else
  665. {
  666. #error _get_pc_for_thread () not implemented for this architecture
  667. }
  668. #endif
  669. #endif
  670. /***********************************************************************
  671. * _collecting_in_critical.
  672. * Returns TRUE if some thread is currently executing a cache-reading
  673. * function. Collection of cache garbage is not allowed when a cache-
  674. * reading function is in progress because it might still be using
  675. * the garbage memory.
  676. **********************************************************************/
  677. #if HAVE_TASK_RESTARTABLE_RANGES
  678. #include <kern/restartable.h>
  679. #else
  680. typedef struct {
  681. uint64_t location;
  682. unsigned short length;
  683. unsigned short recovery_offs;
  684. unsigned int flags;
  685. } task_restartable_range_t;
  686. #endif
  687. extern "C" task_restartable_range_t objc_restartableRanges[];
  688. #if HAVE_TASK_RESTARTABLE_RANGES
  689. static bool shouldUseRestartableRanges = true;
  690. #endif
  691. void cache_init()
  692. {
  693. #if HAVE_TASK_RESTARTABLE_RANGES
  694. mach_msg_type_number_t count = 0;
  695. kern_return_t kr;
  696. while (objc_restartableRanges[count].location) {
  697. count++;
  698. }
  699. kr = task_restartable_ranges_register(mach_task_self(),
  700. objc_restartableRanges, count);
  701. if (kr == KERN_SUCCESS) return;
  702. _objc_fatal("task_restartable_ranges_register failed (result 0x%x: %s)",
  703. kr, mach_error_string(kr));
  704. #endif // HAVE_TASK_RESTARTABLE_RANGES
  705. }
  706. static int _collecting_in_critical(void)
  707. {
  708. #if TARGET_OS_WIN32
  709. return TRUE;
  710. #elif HAVE_TASK_RESTARTABLE_RANGES
  711. // Only use restartable ranges if we registered them earlier.
  712. if (shouldUseRestartableRanges) {
  713. kern_return_t kr = task_restartable_ranges_synchronize(mach_task_self());
  714. if (kr == KERN_SUCCESS) return FALSE;
  715. _objc_fatal("task_restartable_ranges_synchronize failed (result 0x%x: %s)",
  716. kr, mach_error_string(kr));
  717. }
  718. #endif // !HAVE_TASK_RESTARTABLE_RANGES
  719. // Fallthrough if we didn't use restartable ranges.
  720. thread_act_port_array_t threads;
  721. unsigned number;
  722. unsigned count;
  723. kern_return_t ret;
  724. int result;
  725. mach_port_t mythread = pthread_mach_thread_np(objc_thread_self());
  726. // Get a list of all the threads in the current task
  727. #if !DEBUG_TASK_THREADS
  728. ret = task_threads(mach_task_self(), &threads, &number);
  729. #else
  730. ret = objc_task_threads(mach_task_self(), &threads, &number);
  731. #endif
  732. if (ret != KERN_SUCCESS) {
  733. // See DEBUG_TASK_THREADS below to help debug this.
  734. _objc_fatal("task_threads failed (result 0x%x)\n", ret);
  735. }
  736. // Check whether any thread is in the cache lookup code
  737. result = FALSE;
  738. for (count = 0; count < number; count++)
  739. {
  740. int region;
  741. uintptr_t pc;
  742. // Don't bother checking ourselves
  743. if (threads[count] == mythread)
  744. continue;
  745. // Find out where thread is executing
  746. pc = _get_pc_for_thread (threads[count]);
  747. // Check for bad status, and if so, assume the worse (can't collect)
  748. if (pc == PC_SENTINEL)
  749. {
  750. result = TRUE;
  751. goto done;
  752. }
  753. // Check whether it is in the cache lookup code
  754. for (region = 0; objc_restartableRanges[region].location != 0; region++)
  755. {
  756. uint64_t loc = objc_restartableRanges[region].location;
  757. if ((pc > loc) &&
  758. (pc - loc < (uint64_t)objc_restartableRanges[region].length))
  759. {
  760. result = TRUE;
  761. goto done;
  762. }
  763. }
  764. }
  765. done:
  766. // Deallocate the port rights for the threads
  767. for (count = 0; count < number; count++) {
  768. mach_port_deallocate(mach_task_self (), threads[count]);
  769. }
  770. // Deallocate the thread list
  771. vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
  772. // Return our finding
  773. return result;
  774. }
  775. /***********************************************************************
  776. * _garbage_make_room. Ensure that there is enough room for at least
  777. * one more ref in the garbage.
  778. **********************************************************************/
  779. // amount of memory represented by all refs in the garbage
  780. static size_t garbage_byte_size = 0;
  781. // do not empty the garbage until garbage_byte_size gets at least this big
  782. static size_t garbage_threshold = 32*1024;
  783. // table of refs to free
  784. static bucket_t **garbage_refs = 0;
  785. // current number of refs in garbage_refs
  786. static size_t garbage_count = 0;
  787. // capacity of current garbage_refs
  788. static size_t garbage_max = 0;
  789. // capacity of initial garbage_refs
  790. enum {
  791. INIT_GARBAGE_COUNT = 128
  792. };
  793. static void _garbage_make_room(void)
  794. {
  795. static int first = 1;
  796. // Create the collection table the first time it is needed
  797. if (first)
  798. {
  799. first = 0;
  800. garbage_refs = (bucket_t**)
  801. malloc(INIT_GARBAGE_COUNT * sizeof(void *));
  802. garbage_max = INIT_GARBAGE_COUNT;
  803. }
  804. // Double the table if it is full
  805. else if (garbage_count == garbage_max)
  806. {
  807. garbage_refs = (bucket_t**)
  808. realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
  809. garbage_max *= 2;
  810. }
  811. }
  812. /***********************************************************************
  813. * cache_collect_free. Add the specified malloc'd memory to the list
  814. * of them to free at some later point.
  815. * size is used for the collection threshold. It does not have to be
  816. * precisely the block's size.
  817. * Cache locks: cacheUpdateLock must be held by the caller.
  818. **********************************************************************/
  819. static void cache_collect_free(bucket_t *data, mask_t capacity)
  820. {
  821. #if CONFIG_USE_CACHE_LOCK
  822. cacheUpdateLock.assertLocked();
  823. #else
  824. runtimeLock.assertLocked();
  825. #endif
  826. if (PrintCaches) recordDeadCache(capacity);
  827. _garbage_make_room ();
  828. garbage_byte_size += cache_t::bytesForCapacity(capacity);
  829. garbage_refs[garbage_count++] = data;
  830. cache_collect(false);
  831. }
  832. /***********************************************************************
  833. * cache_collect. Try to free accumulated dead caches.
  834. * collectALot tries harder to free memory.
  835. * Cache locks: cacheUpdateLock must be held by the caller.
  836. **********************************************************************/
  837. void cache_collect(bool collectALot)
  838. {
  839. #if CONFIG_USE_CACHE_LOCK
  840. cacheUpdateLock.assertLocked();
  841. #else
  842. runtimeLock.assertLocked();
  843. #endif
  844. // Done if the garbage is not full
  845. if (garbage_byte_size < garbage_threshold && !collectALot) {
  846. return;
  847. }
  848. // Synchronize collection with objc_msgSend and other cache readers
  849. if (!collectALot) {
  850. if (_collecting_in_critical ()) {
  851. // objc_msgSend (or other cache reader) is currently looking in
  852. // the cache and might still be using some garbage.
  853. if (PrintCaches) {
  854. _objc_inform ("CACHES: not collecting; "
  855. "objc_msgSend in progress");
  856. }
  857. return;
  858. }
  859. }
  860. else {
  861. // No excuses.
  862. while (_collecting_in_critical())
  863. ;
  864. }
  865. // No cache readers in progress - garbage is now deletable
  866. // Log our progress
  867. if (PrintCaches) {
  868. cache_collections++;
  869. _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
  870. }
  871. // Dispose all refs now in the garbage
  872. // Erase each entry so debugging tools don't see stale pointers.
  873. while (garbage_count--) {
  874. auto dead = garbage_refs[garbage_count];
  875. garbage_refs[garbage_count] = nil;
  876. free(dead);
  877. }
  878. // Clear the garbage count and total size indicator
  879. garbage_count = 0;
  880. garbage_byte_size = 0;
  881. if (PrintCaches) {
  882. size_t i;
  883. size_t total_count = 0;
  884. size_t total_size = 0;
  885. for (i = 0; i < countof(cache_counts); i++) {
  886. int count = cache_counts[i];
  887. int slots = 1 << i;
  888. size_t size = count * slots * sizeof(bucket_t);
  889. if (!count) continue;
  890. _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
  891. slots, count, size);
  892. total_count += count;
  893. total_size += size;
  894. }
  895. _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
  896. total_count, total_size);
  897. }
  898. }
  899. /***********************************************************************
  900. * objc_task_threads
  901. * Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
  902. * crashes when task_threads() is failing.
  903. *
  904. * A failure in task_threads() usually means somebody has botched their
  905. * Mach or MIG traffic. For example, somebody's error handling was wrong
  906. * and they left a message queued on the MIG reply port for task_threads()
  907. * to trip over.
  908. *
  909. * The code below is a modified version of task_threads(). It logs
  910. * the msgh_id of the reply message. The msgh_id can identify the sender
  911. * of the message, which can help pinpoint the faulty code.
  912. * DEBUG_TASK_THREADS also calls collecting_in_critical() during every
  913. * message dispatch, which can increase reproducibility of bugs.
  914. *
  915. * This code can be regenerated by running
  916. * `mig /usr/include/mach/task.defs`.
  917. **********************************************************************/
  918. #if DEBUG_TASK_THREADS
  919. #include <mach/mach.h>
  920. #include <mach/message.h>
  921. #include <mach/mig.h>
  922. #define __MIG_check__Reply__task_subsystem__ 1
  923. #define mig_internal static inline
  924. #define __DeclareSendRpc(a, b)
  925. #define __BeforeSendRpc(a, b)
  926. #define __AfterSendRpc(a, b)
  927. #define msgh_request_port msgh_remote_port
  928. #define msgh_reply_port msgh_local_port
  929. #ifndef __MachMsgErrorWithTimeout
  930. #define __MachMsgErrorWithTimeout(_R_) { \
  931. switch (_R_) { \
  932. case MACH_SEND_INVALID_DATA: \
  933. case MACH_SEND_INVALID_DEST: \
  934. case MACH_SEND_INVALID_HEADER: \
  935. mig_put_reply_port(InP->Head.msgh_reply_port); \
  936. break; \
  937. case MACH_SEND_TIMED_OUT: \
  938. case MACH_RCV_TIMED_OUT: \
  939. default: \
  940. mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
  941. } \
  942. }
  943. #endif /* __MachMsgErrorWithTimeout */
  944. #ifndef __MachMsgErrorWithoutTimeout
  945. #define __MachMsgErrorWithoutTimeout(_R_) { \
  946. switch (_R_) { \
  947. case MACH_SEND_INVALID_DATA: \
  948. case MACH_SEND_INVALID_DEST: \
  949. case MACH_SEND_INVALID_HEADER: \
  950. mig_put_reply_port(InP->Head.msgh_reply_port); \
  951. break; \
  952. default: \
  953. mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
  954. } \
  955. }
  956. #endif /* __MachMsgErrorWithoutTimeout */
  957. #if ( __MigTypeCheck )
  958. #if __MIG_check__Reply__task_subsystem__
  959. #if !defined(__MIG_check__Reply__task_threads_t__defined)
  960. #define __MIG_check__Reply__task_threads_t__defined
  961. mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
  962. {
  963. typedef __Reply__task_threads_t __Reply;
  964. boolean_t msgh_simple;
  965. #if __MigTypeCheck
  966. unsigned int msgh_size;
  967. #endif /* __MigTypeCheck */
  968. if (Out0P->Head.msgh_id != 3502) {
  969. if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
  970. { return MIG_SERVER_DIED; }
  971. else
  972. { return MIG_REPLY_MISMATCH; }
  973. }
  974. msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
  975. #if __MigTypeCheck
  976. msgh_size = Out0P->Head.msgh_size;
  977. if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
  978. msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
  979. (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
  980. ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
  981. { return MIG_TYPE_ERROR ; }
  982. #endif /* __MigTypeCheck */
  983. if (msgh_simple) {
  984. return ((mig_reply_error_t *)Out0P)->RetCode;
  985. }
  986. #if __MigTypeCheck
  987. if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
  988. Out0P->act_list.disposition != 17) {
  989. return MIG_TYPE_ERROR;
  990. }
  991. #endif /* __MigTypeCheck */
  992. return MACH_MSG_SUCCESS;
  993. }
  994. #endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
  995. #endif /* __MIG_check__Reply__task_subsystem__ */
  996. #endif /* ( __MigTypeCheck ) */
  997. /* Routine task_threads */
  998. static kern_return_t objc_task_threads
  999. (
  1000. task_t target_task,
  1001. thread_act_array_t *act_list,
  1002. mach_msg_type_number_t *act_listCnt
  1003. )
  1004. {
  1005. #ifdef __MigPackStructs
  1006. #pragma pack(4)
  1007. #endif
  1008. typedef struct {
  1009. mach_msg_header_t Head;
  1010. } Request;
  1011. #ifdef __MigPackStructs
  1012. #pragma pack()
  1013. #endif
  1014. #ifdef __MigPackStructs
  1015. #pragma pack(4)
  1016. #endif
  1017. typedef struct {
  1018. mach_msg_header_t Head;
  1019. /* start of the kernel processed data */
  1020. mach_msg_body_t msgh_body;
  1021. mach_msg_ool_ports_descriptor_t act_list;
  1022. /* end of the kernel processed data */
  1023. NDR_record_t NDR;
  1024. mach_msg_type_number_t act_listCnt;
  1025. mach_msg_trailer_t trailer;
  1026. } Reply;
  1027. #ifdef __MigPackStructs
  1028. #pragma pack()
  1029. #endif
  1030. #ifdef __MigPackStructs
  1031. #pragma pack(4)
  1032. #endif
  1033. typedef struct {
  1034. mach_msg_header_t Head;
  1035. /* start of the kernel processed data */
  1036. mach_msg_body_t msgh_body;
  1037. mach_msg_ool_ports_descriptor_t act_list;
  1038. /* end of the kernel processed data */
  1039. NDR_record_t NDR;
  1040. mach_msg_type_number_t act_listCnt;
  1041. } __Reply;
  1042. #ifdef __MigPackStructs
  1043. #pragma pack()
  1044. #endif
  1045. /*
  1046. * typedef struct {
  1047. * mach_msg_header_t Head;
  1048. * NDR_record_t NDR;
  1049. * kern_return_t RetCode;
  1050. * } mig_reply_error_t;
  1051. */
  1052. union {
  1053. Request In;
  1054. Reply Out;
  1055. } Mess;
  1056. Request *InP = &Mess.In;
  1057. Reply *Out0P = &Mess.Out;
  1058. mach_msg_return_t msg_result;
  1059. #ifdef __MIG_check__Reply__task_threads_t__defined
  1060. kern_return_t check_result;
  1061. #endif /* __MIG_check__Reply__task_threads_t__defined */
  1062. __DeclareSendRpc(3402, "task_threads")
  1063. InP->Head.msgh_bits =
  1064. MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
  1065. /* msgh_size passed as argument */
  1066. InP->Head.msgh_request_port = target_task;
  1067. InP->Head.msgh_reply_port = mig_get_reply_port();
  1068. InP->Head.msgh_id = 3402;
  1069. __BeforeSendRpc(3402, "task_threads")
  1070. msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  1071. __AfterSendRpc(3402, "task_threads")
  1072. if (msg_result != MACH_MSG_SUCCESS) {
  1073. _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
  1074. (size_t)Out0P->Head.msgh_id);
  1075. __MachMsgErrorWithoutTimeout(msg_result);
  1076. { return msg_result; }
  1077. }
  1078. #if defined(__MIG_check__Reply__task_threads_t__defined)
  1079. check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
  1080. if (check_result != MACH_MSG_SUCCESS)
  1081. { return check_result; }
  1082. #endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
  1083. *act_list = (thread_act_array_t)(Out0P->act_list.address);
  1084. *act_listCnt = Out0P->act_listCnt;
  1085. return KERN_SUCCESS;
  1086. }
  1087. // DEBUG_TASK_THREADS
  1088. #endif
  1089. // __OBJC2__
  1090. #endif