objc-runtime-new.h 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. /*
  2. * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. #ifndef _OBJC_RUNTIME_NEW_H
  24. #define _OBJC_RUNTIME_NEW_H
  25. // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags)
  26. // The extra bits are optimized for the retain/release and alloc/dealloc paths.
  27. // Values for class_ro_t->flags
  28. // These are emitted by the compiler and are part of the ABI.
  29. // Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang
  30. // class is a metaclass
  31. #define RO_META (1<<0)
  32. // class is a root class
  33. #define RO_ROOT (1<<1)
  34. // class has .cxx_construct/destruct implementations
  35. #define RO_HAS_CXX_STRUCTORS (1<<2)
  36. // class has +load implementation
  37. // #define RO_HAS_LOAD_METHOD (1<<3)
  38. // class has visibility=hidden set
  39. #define RO_HIDDEN (1<<4)
  40. // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
  41. #define RO_EXCEPTION (1<<5)
  42. // class has ro field for Swift metadata initializer callback
  43. #define RO_HAS_SWIFT_INITIALIZER (1<<6)
  44. // class compiled with ARC
  45. #define RO_IS_ARC (1<<7)
  46. // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
  47. #define RO_HAS_CXX_DTOR_ONLY (1<<8)
  48. // class is not ARC but has ARC-style weak ivar layout
  49. #define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
  50. // class does not allow associated objects on instances
  51. #define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10)
  52. // class is in an unloadable bundle - must never be set by compiler
  53. #define RO_FROM_BUNDLE (1<<29)
  54. // class is unrealized future class - must never be set by compiler
  55. #define RO_FUTURE (1<<30)
  56. // class is realized - must never be set by compiler
  57. #define RO_REALIZED (1<<31)
  58. // Values for class_rw_t->flags
  59. // These are not emitted by the compiler and are never used in class_ro_t.
  60. // Their presence should be considered in future ABI versions.
  61. // class_t->data is class_rw_t, not class_ro_t
  62. #define RW_REALIZED (1<<31)
  63. // class is unresolved future class
  64. #define RW_FUTURE (1<<30)
  65. // class is initialized
  66. #define RW_INITIALIZED (1<<29)
  67. // class is initializing
  68. #define RW_INITIALIZING (1<<28)
  69. // class_rw_t->ro is heap copy of class_ro_t
  70. #define RW_COPIED_RO (1<<27)
  71. // class allocated but not yet registered
  72. #define RW_CONSTRUCTING (1<<26)
  73. // class allocated and registered
  74. #define RW_CONSTRUCTED (1<<25)
  75. // available for use; was RW_FINALIZE_ON_MAIN_THREAD
  76. // #define RW_24 (1<<24)
  77. // class +load has been called
  78. #define RW_LOADED (1<<23)
  79. #if !SUPPORT_NONPOINTER_ISA
  80. // class instances may have associative references
  81. #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
  82. #endif
  83. // class has instance-specific GC layout
  84. #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
  85. // class does not allow associated objects on its instances
  86. #define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20)
  87. // class has started realizing but not yet completed it
  88. #define RW_REALIZING (1<<19)
  89. // NOTE: MORE RW_ FLAGS DEFINED BELOW
  90. // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*),
  91. // or class_t->bits (FAST_*).
  92. //
  93. // FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection.
  94. #if __LP64__
  95. // class is a Swift class from the pre-stable Swift ABI
  96. #define FAST_IS_SWIFT_LEGACY (1UL<<0)
  97. // class is a Swift class from the stable Swift ABI
  98. #define FAST_IS_SWIFT_STABLE (1UL<<1)
  99. // class or superclass has default retain/release/autorelease/retainCount/
  100. // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
  101. #define FAST_HAS_DEFAULT_RR (1UL<<2)
  102. // data pointer
  103. #define FAST_DATA_MASK 0x00007ffffffffff8UL
  104. #if __arm64__
  105. // class or superclass has .cxx_construct/.cxx_destruct implementation
  106. // FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in
  107. // isa_t::has_cxx_dtor is a single bfi
  108. #define FAST_CACHE_HAS_CXX_DTOR (1<<0)
  109. #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
  110. // Denormalized RO_META to avoid an indirection
  111. #define FAST_CACHE_META (1<<2)
  112. #else
  113. // Denormalized RO_META to avoid an indirection
  114. #define FAST_CACHE_META (1<<0)
  115. // class or superclass has .cxx_construct/.cxx_destruct implementation
  116. // FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor
  117. #define FAST_CACHE_HAS_CXX_CTOR (1<<1)
  118. #define FAST_CACHE_HAS_CXX_DTOR (1<<2)
  119. #endif
  120. // Fast Alloc fields:
  121. // This stores the word-aligned size of instances + "ALLOC_DELTA16",
  122. // or 0 if the instance size doesn't fit.
  123. //
  124. // These bits occupy the same bits than in the instance size, so that
  125. // the size can be extracted with a simple mask operation.
  126. //
  127. // FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded
  128. // rounded up to the next 16 byte boundary, which is a fastpath for
  129. // _objc_rootAllocWithZone()
  130. #define FAST_CACHE_ALLOC_MASK 0x1ff8
  131. #define FAST_CACHE_ALLOC_MASK16 0x1ff0
  132. #define FAST_CACHE_ALLOC_DELTA16 0x0008
  133. // class's instances requires raw isa
  134. #define FAST_CACHE_REQUIRES_RAW_ISA (1<<13)
  135. // class or superclass has default alloc/allocWithZone: implementation
  136. // Note this is is stored in the metaclass.
  137. #define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14)
  138. // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
  139. #define FAST_CACHE_HAS_DEFAULT_CORE (1<<15)
  140. #else
  141. // class or superclass has .cxx_construct implementation
  142. #define RW_HAS_CXX_CTOR (1<<18)
  143. // class or superclass has .cxx_destruct implementation
  144. #define RW_HAS_CXX_DTOR (1<<17)
  145. // class or superclass has default alloc/allocWithZone: implementation
  146. // Note this is is stored in the metaclass.
  147. #define RW_HAS_DEFAULT_AWZ (1<<16)
  148. // class's instances requires raw isa
  149. #if SUPPORT_NONPOINTER_ISA
  150. #define RW_REQUIRES_RAW_ISA (1<<15)
  151. #endif
  152. // class or superclass has default retain/release/autorelease/retainCount/
  153. // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
  154. #define RW_HAS_DEFAULT_RR (1<<14)
  155. // class or superclass has default new/self/class/respondsToSelector/isKindOfClass
  156. #define RW_HAS_DEFAULT_CORE (1<<13)
  157. // class is a Swift class from the pre-stable Swift ABI
  158. #define FAST_IS_SWIFT_LEGACY (1UL<<0)
  159. // class is a Swift class from the stable Swift ABI
  160. #define FAST_IS_SWIFT_STABLE (1UL<<1)
  161. // data pointer
  162. #define FAST_DATA_MASK 0xfffffffcUL
  163. #endif // __LP64__
  164. // The Swift ABI requires that these bits be defined like this on all platforms.
  165. static_assert(FAST_IS_SWIFT_LEGACY == 1, "resistance is futile");
  166. static_assert(FAST_IS_SWIFT_STABLE == 2, "resistance is futile");
  167. #if __LP64__
  168. typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
  169. #else
  170. typedef uint16_t mask_t;
  171. #endif
  172. typedef uintptr_t SEL;
  173. struct swift_class_t;
  174. enum Atomicity { Atomic = true, NotAtomic = false };
  175. enum IMPEncoding { Encoded = true, Raw = false };
  176. struct bucket_t {
  177. private:
  178. // IMP-first is better for arm64e ptrauth and no worse for arm64.
  179. // SEL-first is better for armv7* and i386 and x86_64.
  180. #if __arm64__
  181. explicit_atomic<uintptr_t> _imp;
  182. explicit_atomic<SEL> _sel;
  183. #else
  184. explicit_atomic<SEL> _sel;
  185. explicit_atomic<uintptr_t> _imp;
  186. #endif
  187. // Compute the ptrauth signing modifier from &_imp, newSel, and cls.
  188. uintptr_t modifierForSEL(SEL newSel, Class cls) const {
  189. return (uintptr_t)&_imp ^ (uintptr_t)newSel ^ (uintptr_t)cls;
  190. }
  191. // Sign newImp, with &_imp, newSel, and cls as modifiers.
  192. uintptr_t encodeImp(IMP newImp, SEL newSel, Class cls) const {
  193. if (!newImp) return 0;
  194. #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
  195. return (uintptr_t)
  196. ptrauth_auth_and_resign(newImp,
  197. ptrauth_key_function_pointer, 0,
  198. ptrauth_key_process_dependent_code,
  199. modifierForSEL(newSel, cls));
  200. #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
  201. return (uintptr_t)newImp ^ (uintptr_t)cls;
  202. #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
  203. return (uintptr_t)newImp;
  204. #else
  205. #error Unknown method cache IMP encoding.
  206. #endif
  207. }
  208. public:
  209. inline SEL sel() const { return _sel.load(memory_order::memory_order_relaxed); }
  210. inline IMP imp(Class cls) const {
  211. uintptr_t imp = _imp.load(memory_order::memory_order_relaxed);
  212. if (!imp) return nil;
  213. #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH
  214. SEL sel = _sel.load(memory_order::memory_order_relaxed);
  215. return (IMP)
  216. ptrauth_auth_and_resign((const void *)imp,
  217. ptrauth_key_process_dependent_code,
  218. modifierForSEL(sel, cls),
  219. ptrauth_key_function_pointer, 0);
  220. #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR
  221. return (IMP)(imp ^ (uintptr_t)cls);
  222. #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE
  223. return (IMP)imp;
  224. #else
  225. #error Unknown method cache IMP encoding.
  226. #endif
  227. }
  228. template <Atomicity, IMPEncoding>
  229. void set(SEL newSel, IMP newImp, Class cls);
  230. };
  231. struct cache_t {
  232. #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
  233. explicit_atomic<struct bucket_t *> _buckets;
  234. explicit_atomic<mask_t> _mask;
  235. #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
  236. explicit_atomic<uintptr_t> _maskAndBuckets;
  237. mask_t _mask_unused;
  238. // How much the mask is shifted by.
  239. static constexpr uintptr_t maskShift = 48;
  240. // Additional bits after the mask which must be zero. msgSend
  241. // takes advantage of these additional bits to construct the value
  242. // `mask << 4` from `_maskAndBuckets` in a single instruction.
  243. static constexpr uintptr_t maskZeroBits = 4;
  244. // The largest mask value we can store.
  245. static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
  246. // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
  247. static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
  248. // Ensure we have enough bits for the buckets pointer.
  249. static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
  250. #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
  251. // _maskAndBuckets stores the mask shift in the low 4 bits, and
  252. // the buckets pointer in the remainder of the value. The mask
  253. // shift is the value where (0xffff >> shift) produces the correct
  254. // mask. This is equal to 16 - log2(cache_size).
  255. explicit_atomic<uintptr_t> _maskAndBuckets;
  256. mask_t _mask_unused;
  257. static constexpr uintptr_t maskBits = 4;
  258. static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
  259. static constexpr uintptr_t bucketsMask = ~maskMask;
  260. #else
  261. #error Unknown cache mask storage type.
  262. #endif
  263. #if __LP64__
  264. uint16_t _flags;
  265. #endif
  266. uint16_t _occupied;
  267. public:
  268. static bucket_t *emptyBuckets();
  269. struct bucket_t *buckets();
  270. mask_t mask();
  271. mask_t occupied();
  272. void incrementOccupied();
  273. void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
  274. void initializeToEmpty();
  275. unsigned capacity();
  276. bool isConstantEmptyCache();
  277. bool canBeFreed();
  278. #if __LP64__
  279. bool getBit(uint16_t flags) const {
  280. return _flags & flags;
  281. }
  282. void setBit(uint16_t set) {
  283. __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
  284. }
  285. void clearBit(uint16_t clear) {
  286. __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
  287. }
  288. #endif
  289. #if FAST_CACHE_ALLOC_MASK
  290. bool hasFastInstanceSize(size_t extra) const
  291. {
  292. if (__builtin_constant_p(extra) && extra == 0) {
  293. return _flags & FAST_CACHE_ALLOC_MASK16;
  294. }
  295. return _flags & FAST_CACHE_ALLOC_MASK;
  296. }
  297. size_t fastInstanceSize(size_t extra) const
  298. {
  299. ASSERT(hasFastInstanceSize(extra));
  300. if (__builtin_constant_p(extra) && extra == 0) {
  301. return _flags & FAST_CACHE_ALLOC_MASK16;
  302. } else {
  303. size_t size = _flags & FAST_CACHE_ALLOC_MASK;
  304. // remove the FAST_CACHE_ALLOC_DELTA16 that was added
  305. // by setFastInstanceSize
  306. return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
  307. }
  308. }
  309. void setFastInstanceSize(size_t newSize)
  310. {
  311. // Set during realization or construction only. No locking needed.
  312. uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
  313. uint16_t sizeBits;
  314. // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
  315. // to yield the proper 16byte aligned allocation size with a single mask
  316. sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
  317. sizeBits &= FAST_CACHE_ALLOC_MASK;
  318. if (newSize <= sizeBits) {
  319. newBits |= sizeBits;
  320. }
  321. _flags = newBits;
  322. }
  323. #else
  324. bool hasFastInstanceSize(size_t extra) const {
  325. return false;
  326. }
  327. size_t fastInstanceSize(size_t extra) const {
  328. abort();
  329. }
  330. void setFastInstanceSize(size_t extra) {
  331. // nothing
  332. }
  333. #endif
  334. static size_t bytesForCapacity(uint32_t cap);
  335. static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
  336. void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
  337. void insert(Class cls, SEL sel, IMP imp, id receiver);
  338. static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
  339. };
  340. // classref_t is unremapped class_t*
  341. typedef struct classref * classref_t;
  342. #ifdef __PTRAUTH_INTRINSICS__
  343. # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671)
  344. #else
  345. # define StubClassInitializerPtrauth
  346. #endif
  347. struct stub_class_t {
  348. uintptr_t isa;
  349. _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer;
  350. };
  351. /***********************************************************************
  352. * entsize_list_tt<Element, List, FlagMask>
  353. * Generic implementation of an array of non-fragile structs.
  354. *
  355. * Element is the struct type (e.g. method_t)
  356. * List is the specialization of entsize_list_tt (e.g. method_list_t)
  357. * FlagMask is used to stash extra bits in the entsize field
  358. * (e.g. method list fixup markers)
  359. **********************************************************************/
  360. template <typename Element, typename List, uint32_t FlagMask>
  361. struct entsize_list_tt {
  362. uint32_t entsizeAndFlags;
  363. uint32_t count;
  364. Element first;
  365. uint32_t entsize() const {
  366. return entsizeAndFlags & ~FlagMask;
  367. }
  368. uint32_t flags() const {
  369. return entsizeAndFlags & FlagMask;
  370. }
  371. Element& getOrEnd(uint32_t i) const {
  372. ASSERT(i <= count);
  373. return *(Element *)((uint8_t *)&first + i*entsize());
  374. }
  375. Element& get(uint32_t i) const {
  376. ASSERT(i < count);
  377. return getOrEnd(i);
  378. }
  379. size_t byteSize() const {
  380. return byteSize(entsize(), count);
  381. }
  382. static size_t byteSize(uint32_t entsize, uint32_t count) {
  383. return sizeof(entsize_list_tt) + (count-1)*entsize;
  384. }
  385. List *duplicate() const {
  386. auto *dup = (List *)calloc(this->byteSize(), 1);
  387. dup->entsizeAndFlags = this->entsizeAndFlags;
  388. dup->count = this->count;
  389. std::copy(begin(), end(), dup->begin());
  390. return dup;
  391. }
  392. struct iterator;
  393. const iterator begin() const {
  394. return iterator(*static_cast<const List*>(this), 0);
  395. }
  396. iterator begin() {
  397. return iterator(*static_cast<const List*>(this), 0);
  398. }
  399. const iterator end() const {
  400. return iterator(*static_cast<const List*>(this), count);
  401. }
  402. iterator end() {
  403. return iterator(*static_cast<const List*>(this), count);
  404. }
  405. struct iterator {
  406. uint32_t entsize;
  407. uint32_t index; // keeping track of this saves a divide in operator-
  408. Element* element;
  409. typedef std::random_access_iterator_tag iterator_category;
  410. typedef Element value_type;
  411. typedef ptrdiff_t difference_type;
  412. typedef Element* pointer;
  413. typedef Element& reference;
  414. iterator() { }
  415. iterator(const List& list, uint32_t start = 0)
  416. : entsize(list.entsize())
  417. , index(start)
  418. , element(&list.getOrEnd(start))
  419. { }
  420. const iterator& operator += (ptrdiff_t delta) {
  421. element = (Element*)((uint8_t *)element + delta*entsize);
  422. index += (int32_t)delta;
  423. return *this;
  424. }
  425. const iterator& operator -= (ptrdiff_t delta) {
  426. element = (Element*)((uint8_t *)element - delta*entsize);
  427. index -= (int32_t)delta;
  428. return *this;
  429. }
  430. const iterator operator + (ptrdiff_t delta) const {
  431. return iterator(*this) += delta;
  432. }
  433. const iterator operator - (ptrdiff_t delta) const {
  434. return iterator(*this) -= delta;
  435. }
  436. iterator& operator ++ () { *this += 1; return *this; }
  437. iterator& operator -- () { *this -= 1; return *this; }
  438. iterator operator ++ (int) {
  439. iterator result(*this); *this += 1; return result;
  440. }
  441. iterator operator -- (int) {
  442. iterator result(*this); *this -= 1; return result;
  443. }
  444. ptrdiff_t operator - (const iterator& rhs) const {
  445. return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
  446. }
  447. Element& operator * () const { return *element; }
  448. Element* operator -> () const { return element; }
  449. operator Element& () const { return *element; }
  450. bool operator == (const iterator& rhs) const {
  451. return this->element == rhs.element;
  452. }
  453. bool operator != (const iterator& rhs) const {
  454. return this->element != rhs.element;
  455. }
  456. bool operator < (const iterator& rhs) const {
  457. return this->element < rhs.element;
  458. }
  459. bool operator > (const iterator& rhs) const {
  460. return this->element > rhs.element;
  461. }
  462. };
  463. };
  464. struct method_t {
  465. SEL name;
  466. const char *types;
  467. MethodListIMP imp;
  468. struct SortBySELAddress :
  469. public std::binary_function<const method_t&,
  470. const method_t&, bool>
  471. {
  472. bool operator() (const method_t& lhs,
  473. const method_t& rhs)
  474. { return lhs.name < rhs.name; }
  475. };
  476. };
  477. struct ivar_t {
  478. #if __x86_64__
  479. // *offset was originally 64-bit on some x86_64 platforms.
  480. // We read and write only 32 bits of it.
  481. // Some metadata provides all 64 bits. This is harmless for unsigned
  482. // little-endian values.
  483. // Some code uses all 64 bits. class_addIvar() over-allocates the
  484. // offset for their benefit.
  485. #endif
  486. int32_t *offset;
  487. const char *name;
  488. const char *type;
  489. // alignment is sometimes -1; use alignment() instead
  490. uint32_t alignment_raw;
  491. uint32_t size;
  492. uint32_t alignment() const {
  493. if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
  494. return 1 << alignment_raw;
  495. }
  496. };
  497. struct property_t {
  498. const char *name;
  499. const char *attributes;
  500. };
  501. // Two bits of entsize are used for fixup markers.
  502. struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
  503. bool isUniqued() const;
  504. bool isFixedUp() const;
  505. void setFixedUp();
  506. uint32_t indexOfMethod(const method_t *meth) const {
  507. uint32_t i =
  508. (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
  509. ASSERT(i < count);
  510. return i;
  511. }
  512. };
  513. struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
  514. bool containsIvar(Ivar ivar) const {
  515. return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end());
  516. }
  517. };
  518. struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
  519. };
  520. typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
  521. // Values for protocol_t->flags
  522. #define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
  523. #define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
  524. #define PROTOCOL_IS_CANONICAL (1<<29) // must never be set by compiler
  525. // Bits 0..15 are reserved for Swift's use.
  526. #define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
  527. struct protocol_t : objc_object {
  528. const char *mangledName;
  529. struct protocol_list_t *protocols;
  530. method_list_t *instanceMethods;
  531. method_list_t *classMethods;
  532. method_list_t *optionalInstanceMethods;
  533. method_list_t *optionalClassMethods;
  534. property_list_t *instanceProperties;
  535. uint32_t size; // sizeof(protocol_t)
  536. uint32_t flags;
  537. // Fields below this point are not always present on disk.
  538. const char **_extendedMethodTypes;
  539. const char *_demangledName;
  540. property_list_t *_classProperties;
  541. const char *demangledName();
  542. const char *nameForLogging() {
  543. return demangledName();
  544. }
  545. bool isFixedUp() const;
  546. void setFixedUp();
  547. bool isCanonical() const;
  548. void clearIsCanonical();
  549. # define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f))
  550. bool hasExtendedMethodTypesField() const {
  551. return HAS_FIELD(_extendedMethodTypes);
  552. }
  553. bool hasDemangledNameField() const {
  554. return HAS_FIELD(_demangledName);
  555. }
  556. bool hasClassPropertiesField() const {
  557. return HAS_FIELD(_classProperties);
  558. }
  559. # undef HAS_FIELD
  560. const char **extendedMethodTypes() const {
  561. return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil;
  562. }
  563. property_list_t *classProperties() const {
  564. return hasClassPropertiesField() ? _classProperties : nil;
  565. }
  566. };
  567. struct protocol_list_t {
  568. // count is pointer-sized by accident.
  569. uintptr_t count;
  570. protocol_ref_t list[0]; // variable-size
  571. size_t byteSize() const {
  572. return sizeof(*this) + count*sizeof(list[0]);
  573. }
  574. protocol_list_t *duplicate() const {
  575. return (protocol_list_t *)memdup(this, this->byteSize());
  576. }
  577. typedef protocol_ref_t* iterator;
  578. typedef const protocol_ref_t* const_iterator;
  579. const_iterator begin() const {
  580. return list;
  581. }
  582. iterator begin() {
  583. return list;
  584. }
  585. const_iterator end() const {
  586. return list + count;
  587. }
  588. iterator end() {
  589. return list + count;
  590. }
  591. };
  592. struct class_ro_t {
  593. uint32_t flags;
  594. uint32_t instanceStart;
  595. uint32_t instanceSize;
  596. #ifdef __LP64__
  597. uint32_t reserved;
  598. #endif
  599. const uint8_t * ivarLayout;
  600. const char * name;
  601. method_list_t * baseMethodList;
  602. protocol_list_t * baseProtocols;
  603. const ivar_list_t * ivars;
  604. const uint8_t * weakIvarLayout;
  605. property_list_t *baseProperties;
  606. // This field exists only when RO_HAS_SWIFT_INITIALIZER is set.
  607. _objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE[0];
  608. _objc_swiftMetadataInitializer swiftMetadataInitializer() const {
  609. if (flags & RO_HAS_SWIFT_INITIALIZER) {
  610. return _swiftMetadataInitializer_NEVER_USE[0];
  611. } else {
  612. return nil;
  613. }
  614. }
  615. method_list_t *baseMethods() const {
  616. return baseMethodList;
  617. }
  618. class_ro_t *duplicate() const {
  619. if (flags & RO_HAS_SWIFT_INITIALIZER) {
  620. size_t size = sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE[0]);
  621. class_ro_t *ro = (class_ro_t *)memdup(this, size);
  622. ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0];
  623. return ro;
  624. } else {
  625. size_t size = sizeof(*this);
  626. class_ro_t *ro = (class_ro_t *)memdup(this, size);
  627. return ro;
  628. }
  629. }
  630. };
  631. /***********************************************************************
  632. * list_array_tt<Element, List>
  633. * Generic implementation for metadata that can be augmented by categories.
  634. *
  635. * Element is the underlying metadata type (e.g. method_t)
  636. * List is the metadata's list type (e.g. method_list_t)
  637. *
  638. * A list_array_tt has one of three values:
  639. * - empty
  640. * - a pointer to a single list
  641. * - an array of pointers to lists
  642. *
  643. * countLists/beginLists/endLists iterate the metadata lists
  644. * count/begin/end iterate the underlying metadata elements
  645. **********************************************************************/
  646. template <typename Element, typename List>
  647. class list_array_tt {
  648. struct array_t {
  649. uint32_t count;
  650. List* lists[0];
  651. static size_t byteSize(uint32_t count) {
  652. return sizeof(array_t) + count*sizeof(lists[0]);
  653. }
  654. size_t byteSize() {
  655. return byteSize(count);
  656. }
  657. };
  658. protected:
  659. class iterator {
  660. List **lists;
  661. List **listsEnd;
  662. typename List::iterator m, mEnd;
  663. public:
  664. iterator(List **begin, List **end)
  665. : lists(begin), listsEnd(end)
  666. {
  667. if (begin != end) {
  668. m = (*begin)->begin();
  669. mEnd = (*begin)->end();
  670. }
  671. }
  672. const Element& operator * () const {
  673. return *m;
  674. }
  675. Element& operator * () {
  676. return *m;
  677. }
  678. bool operator != (const iterator& rhs) const {
  679. if (lists != rhs.lists) return true;
  680. if (lists == listsEnd) return false; // m is undefined
  681. if (m != rhs.m) return true;
  682. return false;
  683. }
  684. const iterator& operator ++ () {
  685. ASSERT(m != mEnd);
  686. m++;
  687. if (m == mEnd) {
  688. ASSERT(lists != listsEnd);
  689. lists++;
  690. if (lists != listsEnd) {
  691. m = (*lists)->begin();
  692. mEnd = (*lists)->end();
  693. }
  694. }
  695. return *this;
  696. }
  697. };
  698. private:
  699. union {
  700. List* list;
  701. uintptr_t arrayAndFlag;
  702. };
  703. bool hasArray() const {
  704. return arrayAndFlag & 1;
  705. }
  706. array_t *array() {
  707. return (array_t *)(arrayAndFlag & ~1);
  708. }
  709. void setArray(array_t *array) {
  710. arrayAndFlag = (uintptr_t)array | 1;
  711. }
  712. public:
  713. uint32_t count() {
  714. uint32_t result = 0;
  715. for (auto lists = beginLists(), end = endLists();
  716. lists != end;
  717. ++lists)
  718. {
  719. result += (*lists)->count;
  720. }
  721. return result;
  722. }
  723. iterator begin() {
  724. return iterator(beginLists(), endLists());
  725. }
  726. iterator end() {
  727. List **e = endLists();
  728. return iterator(e, e);
  729. }
  730. uint32_t countLists() {
  731. if (hasArray()) {
  732. return array()->count;
  733. } else if (list) {
  734. return 1;
  735. } else {
  736. return 0;
  737. }
  738. }
  739. List** beginLists() {
  740. if (hasArray()) {
  741. return array()->lists;
  742. } else {
  743. return &list;
  744. }
  745. }
  746. List** endLists() {
  747. if (hasArray()) {
  748. return array()->lists + array()->count;
  749. } else if (list) {
  750. return &list + 1;
  751. } else {
  752. return &list;
  753. }
  754. }
  755. void attachLists(List* const * addedLists, uint32_t addedCount) {
  756. if (addedCount == 0) return;
  757. if (hasArray()) {
  758. // many lists -> many lists
  759. uint32_t oldCount = array()->count;
  760. uint32_t newCount = oldCount + addedCount;
  761. setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
  762. array()->count = newCount;
  763. memmove(array()->lists + addedCount, array()->lists,
  764. oldCount * sizeof(array()->lists[0]));
  765. memcpy(array()->lists, addedLists,
  766. addedCount * sizeof(array()->lists[0]));
  767. }
  768. else if (!list && addedCount == 1) {
  769. // 0 lists -> 1 list
  770. list = addedLists[0];
  771. }
  772. else {
  773. // 1 list -> many lists
  774. List* oldList = list;
  775. uint32_t oldCount = oldList ? 1 : 0;
  776. uint32_t newCount = oldCount + addedCount;
  777. setArray((array_t *)malloc(array_t::byteSize(newCount)));
  778. array()->count = newCount;
  779. if (oldList) array()->lists[addedCount] = oldList;
  780. memcpy(array()->lists, addedLists,
  781. addedCount * sizeof(array()->lists[0]));
  782. }
  783. }
  784. void tryFree() {
  785. if (hasArray()) {
  786. for (uint32_t i = 0; i < array()->count; i++) {
  787. try_free(array()->lists[i]);
  788. }
  789. try_free(array());
  790. }
  791. else if (list) {
  792. try_free(list);
  793. }
  794. }
  795. template<typename Result>
  796. Result duplicate() {
  797. Result result;
  798. if (hasArray()) {
  799. array_t *a = array();
  800. result.setArray((array_t *)memdup(a, a->byteSize()));
  801. for (uint32_t i = 0; i < a->count; i++) {
  802. result.array()->lists[i] = a->lists[i]->duplicate();
  803. }
  804. } else if (list) {
  805. result.list = list->duplicate();
  806. } else {
  807. result.list = nil;
  808. }
  809. return result;
  810. }
  811. };
  812. class method_array_t :
  813. public list_array_tt<method_t, method_list_t>
  814. {
  815. typedef list_array_tt<method_t, method_list_t> Super;
  816. public:
  817. method_list_t **beginCategoryMethodLists() {
  818. return beginLists();
  819. }
  820. method_list_t **endCategoryMethodLists(Class cls);
  821. method_array_t duplicate() {
  822. return Super::duplicate<method_array_t>();
  823. }
  824. };
  825. class property_array_t :
  826. public list_array_tt<property_t, property_list_t>
  827. {
  828. typedef list_array_tt<property_t, property_list_t> Super;
  829. public:
  830. property_array_t duplicate() {
  831. return Super::duplicate<property_array_t>();
  832. }
  833. };
  834. class protocol_array_t :
  835. public list_array_tt<protocol_ref_t, protocol_list_t>
  836. {
  837. typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
  838. public:
  839. protocol_array_t duplicate() {
  840. return Super::duplicate<protocol_array_t>();
  841. }
  842. };
  843. struct class_rw_t {
  844. // Be warned that Symbolication knows the layout of this structure.
  845. uint32_t flags;
  846. uint16_t version;
  847. uint16_t witness;
  848. const class_ro_t *ro;
  849. method_array_t methods;
  850. property_array_t properties;
  851. protocol_array_t protocols;
  852. Class firstSubclass;
  853. Class nextSiblingClass;
  854. char *demangledName;
  855. #if SUPPORT_INDEXED_ISA
  856. uint32_t index;
  857. #endif
  858. void setFlags(uint32_t set)
  859. {
  860. __c11_atomic_fetch_or((_Atomic(uint32_t) *)&flags, set, __ATOMIC_RELAXED);
  861. }
  862. void clearFlags(uint32_t clear)
  863. {
  864. __c11_atomic_fetch_and((_Atomic(uint32_t) *)&flags, ~clear, __ATOMIC_RELAXED);
  865. }
  866. // set and clear must not overlap
  867. void changeFlags(uint32_t set, uint32_t clear)
  868. {
  869. ASSERT((set & clear) == 0);
  870. uint32_t oldf, newf;
  871. do {
  872. oldf = flags;
  873. newf = (oldf | set) & ~clear;
  874. } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
  875. }
  876. };
  877. struct class_data_bits_t {
  878. friend objc_class;
  879. // Values are the FAST_ flags above.
  880. uintptr_t bits;
  881. private:
  882. bool getBit(uintptr_t bit) const
  883. {
  884. return bits & bit;
  885. }
  886. // Atomically set the bits in `set` and clear the bits in `clear`.
  887. // set and clear must not overlap.
  888. void setAndClearBits(uintptr_t set, uintptr_t clear)
  889. {
  890. ASSERT((set & clear) == 0);
  891. uintptr_t oldBits;
  892. uintptr_t newBits;
  893. do {
  894. oldBits = LoadExclusive(&bits);
  895. newBits = (oldBits | set) & ~clear;
  896. } while (!StoreReleaseExclusive(&bits, oldBits, newBits));
  897. }
  898. void setBits(uintptr_t set) {
  899. __c11_atomic_fetch_or((_Atomic(uintptr_t) *)&bits, set, __ATOMIC_RELAXED);
  900. }
  901. void clearBits(uintptr_t clear) {
  902. __c11_atomic_fetch_and((_Atomic(uintptr_t) *)&bits, ~clear, __ATOMIC_RELAXED);
  903. }
  904. public:
  905. class_rw_t* data() const {
  906. return (class_rw_t *)(bits & FAST_DATA_MASK);
  907. }
  908. void setData(class_rw_t *newData)
  909. {
  910. ASSERT(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
  911. // Set during realization or construction only. No locking needed.
  912. // Use a store-release fence because there may be concurrent
  913. // readers of data and data's contents.
  914. uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
  915. atomic_thread_fence(memory_order_release);
  916. bits = newBits;
  917. }
  918. // Get the class's ro data, even in the presence of concurrent realization.
  919. // fixme this isn't really safe without a compiler barrier at least
  920. // and probably a memory barrier when realizeClass changes the data field
  921. const class_ro_t *safe_ro() {
  922. class_rw_t *maybe_rw = data();
  923. if (maybe_rw->flags & RW_REALIZED) {
  924. // maybe_rw is rw
  925. return maybe_rw->ro;
  926. } else {
  927. // maybe_rw is actually ro
  928. return (class_ro_t *)maybe_rw;
  929. }
  930. }
  931. void setClassArrayIndex(unsigned Idx) {
  932. #if SUPPORT_INDEXED_ISA
  933. // 0 is unused as then we can rely on zero-initialisation from calloc.
  934. ASSERT(Idx > 0);
  935. data()->index = Idx;
  936. #endif
  937. }
  938. unsigned classArrayIndex() {
  939. #if SUPPORT_INDEXED_ISA
  940. return data()->index;
  941. #else
  942. return 0;
  943. #endif
  944. }
  945. bool isAnySwift() {
  946. return isSwiftStable() || isSwiftLegacy();
  947. }
  948. bool isSwiftStable() {
  949. return getBit(FAST_IS_SWIFT_STABLE);
  950. }
  951. void setIsSwiftStable() {
  952. setAndClearBits(FAST_IS_SWIFT_STABLE, FAST_IS_SWIFT_LEGACY);
  953. }
  954. bool isSwiftLegacy() {
  955. return getBit(FAST_IS_SWIFT_LEGACY);
  956. }
  957. void setIsSwiftLegacy() {
  958. setAndClearBits(FAST_IS_SWIFT_LEGACY, FAST_IS_SWIFT_STABLE);
  959. }
  960. // fixme remove this once the Swift runtime uses the stable bits
  961. bool isSwiftStable_ButAllowLegacyForNow() {
  962. return isAnySwift();
  963. }
  964. _objc_swiftMetadataInitializer swiftMetadataInitializer() {
  965. // This function is called on un-realized classes without
  966. // holding any locks.
  967. // Beware of races with other realizers.
  968. return safe_ro()->swiftMetadataInitializer();
  969. }
  970. };
  971. struct objc_class : objc_object {
  972. // Class ISA;
  973. Class superclass;
  974. cache_t cache; // formerly cache pointer and vtable
  975. class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
  976. class_rw_t *data() const {
  977. return bits.data();
  978. }
  979. void setData(class_rw_t *newData) {
  980. bits.setData(newData);
  981. }
  982. void setInfo(uint32_t set) {
  983. ASSERT(isFuture() || isRealized());
  984. data()->setFlags(set);
  985. }
  986. void clearInfo(uint32_t clear) {
  987. ASSERT(isFuture() || isRealized());
  988. data()->clearFlags(clear);
  989. }
  990. // set and clear must not overlap
  991. void changeInfo(uint32_t set, uint32_t clear) {
  992. ASSERT(isFuture() || isRealized());
  993. ASSERT((set & clear) == 0);
  994. data()->changeFlags(set, clear);
  995. }
  996. #if FAST_HAS_DEFAULT_RR
  997. bool hasCustomRR() const {
  998. return !bits.getBit(FAST_HAS_DEFAULT_RR);
  999. }
  1000. void setHasDefaultRR() {
  1001. bits.setBits(FAST_HAS_DEFAULT_RR);
  1002. }
  1003. void setHasCustomRR() {
  1004. bits.clearBits(FAST_HAS_DEFAULT_RR);
  1005. }
  1006. #else
  1007. bool hasCustomRR() const {
  1008. return !(bits.data()->flags & RW_HAS_DEFAULT_RR);
  1009. }
  1010. void setHasDefaultRR() {
  1011. bits.data()->setFlags(RW_HAS_DEFAULT_RR);
  1012. }
  1013. void setHasCustomRR() {
  1014. bits.data()->clearFlags(RW_HAS_DEFAULT_RR);
  1015. }
  1016. #endif
  1017. #if FAST_CACHE_HAS_DEFAULT_AWZ
  1018. bool hasCustomAWZ() const {
  1019. return !cache.getBit(FAST_CACHE_HAS_DEFAULT_AWZ);
  1020. }
  1021. void setHasDefaultAWZ() {
  1022. cache.setBit(FAST_CACHE_HAS_DEFAULT_AWZ);
  1023. }
  1024. void setHasCustomAWZ() {
  1025. cache.clearBit(FAST_CACHE_HAS_DEFAULT_AWZ);
  1026. }
  1027. #else
  1028. bool hasCustomAWZ() const {
  1029. return !(bits.data()->flags & RW_HAS_DEFAULT_AWZ);
  1030. }
  1031. void setHasDefaultAWZ() {
  1032. bits.data()->setFlags(RW_HAS_DEFAULT_AWZ);
  1033. }
  1034. void setHasCustomAWZ() {
  1035. bits.data()->clearFlags(RW_HAS_DEFAULT_AWZ);
  1036. }
  1037. #endif
  1038. #if FAST_CACHE_HAS_DEFAULT_CORE
  1039. bool hasCustomCore() const {
  1040. return !cache.getBit(FAST_CACHE_HAS_DEFAULT_CORE);
  1041. }
  1042. void setHasDefaultCore() {
  1043. return cache.setBit(FAST_CACHE_HAS_DEFAULT_CORE);
  1044. }
  1045. void setHasCustomCore() {
  1046. return cache.clearBit(FAST_CACHE_HAS_DEFAULT_CORE);
  1047. }
  1048. #else
  1049. bool hasCustomCore() const {
  1050. return !(bits.data()->flags & RW_HAS_DEFAULT_CORE);
  1051. }
  1052. void setHasDefaultCore() {
  1053. bits.data()->setFlags(RW_HAS_DEFAULT_CORE);
  1054. }
  1055. void setHasCustomCore() {
  1056. bits.data()->clearFlags(RW_HAS_DEFAULT_CORE);
  1057. }
  1058. #endif
  1059. #if FAST_CACHE_HAS_CXX_CTOR
  1060. bool hasCxxCtor() {
  1061. ASSERT(isRealized());
  1062. return cache.getBit(FAST_CACHE_HAS_CXX_CTOR);
  1063. }
  1064. void setHasCxxCtor() {
  1065. cache.setBit(FAST_CACHE_HAS_CXX_CTOR);
  1066. }
  1067. #else
  1068. bool hasCxxCtor() {
  1069. ASSERT(isRealized());
  1070. return bits.data()->flags & RW_HAS_CXX_CTOR;
  1071. }
  1072. void setHasCxxCtor() {
  1073. bits.data()->setFlags(RW_HAS_CXX_CTOR);
  1074. }
  1075. #endif
  1076. #if FAST_CACHE_HAS_CXX_DTOR
  1077. bool hasCxxDtor() {
  1078. ASSERT(isRealized());
  1079. return cache.getBit(FAST_CACHE_HAS_CXX_DTOR);
  1080. }
  1081. void setHasCxxDtor() {
  1082. cache.setBit(FAST_CACHE_HAS_CXX_DTOR);
  1083. }
  1084. #else
  1085. bool hasCxxDtor() {
  1086. ASSERT(isRealized());
  1087. return bits.data()->flags & RW_HAS_CXX_DTOR;
  1088. }
  1089. void setHasCxxDtor() {
  1090. bits.data()->setFlags(RW_HAS_CXX_DTOR);
  1091. }
  1092. #endif
  1093. #if FAST_CACHE_REQUIRES_RAW_ISA
  1094. bool instancesRequireRawIsa() {
  1095. return cache.getBit(FAST_CACHE_REQUIRES_RAW_ISA);
  1096. }
  1097. void setInstancesRequireRawIsa() {
  1098. cache.setBit(FAST_CACHE_REQUIRES_RAW_ISA);
  1099. }
  1100. #elif SUPPORT_NONPOINTER_ISA
  1101. bool instancesRequireRawIsa() {
  1102. return bits.data()->flags & RW_REQUIRES_RAW_ISA;
  1103. }
  1104. void setInstancesRequireRawIsa() {
  1105. bits.data()->setFlags(RW_REQUIRES_RAW_ISA);
  1106. }
  1107. #else
  1108. bool instancesRequireRawIsa() {
  1109. return true;
  1110. }
  1111. void setInstancesRequireRawIsa() {
  1112. // nothing
  1113. }
  1114. #endif
  1115. void setInstancesRequireRawIsaRecursively(bool inherited = false);
  1116. void printInstancesRequireRawIsa(bool inherited);
  1117. bool canAllocNonpointer() {
  1118. ASSERT(!isFuture());
  1119. return !instancesRequireRawIsa();
  1120. }
  1121. bool isSwiftStable() {
  1122. return bits.isSwiftStable();
  1123. }
  1124. bool isSwiftLegacy() {
  1125. return bits.isSwiftLegacy();
  1126. }
  1127. bool isAnySwift() {
  1128. return bits.isAnySwift();
  1129. }
  1130. bool isSwiftStable_ButAllowLegacyForNow() {
  1131. return bits.isSwiftStable_ButAllowLegacyForNow();
  1132. }
  1133. bool isStubClass() const {
  1134. uintptr_t isa = (uintptr_t)isaBits();
  1135. return 1 <= isa && isa < 16;
  1136. }
  1137. // Swift stable ABI built for old deployment targets looks weird.
  1138. // The is-legacy bit is set for compatibility with old libobjc.
  1139. // We are on a "new" deployment target so we need to rewrite that bit.
  1140. // These stable-with-legacy-bit classes are distinguished from real
  1141. // legacy classes using another bit in the Swift data
  1142. // (ClassFlags::IsSwiftPreStableABI)
  1143. bool isUnfixedBackwardDeployingStableSwift() {
  1144. // Only classes marked as Swift legacy need apply.
  1145. if (!bits.isSwiftLegacy()) return false;
  1146. // Check the true legacy vs stable distinguisher.
  1147. // The low bit of Swift's ClassFlags is SET for true legacy
  1148. // and UNSET for stable pretending to be legacy.
  1149. uint32_t swiftClassFlags = *(uint32_t *)(&bits + 1);
  1150. bool isActuallySwiftLegacy = bool(swiftClassFlags & 1);
  1151. return !isActuallySwiftLegacy;
  1152. }
  1153. void fixupBackwardDeployingStableSwift() {
  1154. if (isUnfixedBackwardDeployingStableSwift()) {
  1155. // Class really is stable Swift, pretending to be pre-stable.
  1156. // Fix its lie.
  1157. bits.setIsSwiftStable();
  1158. }
  1159. }
  1160. _objc_swiftMetadataInitializer swiftMetadataInitializer() {
  1161. return bits.swiftMetadataInitializer();
  1162. }
  1163. // Return YES if the class's ivars are managed by ARC,
  1164. // or the class is MRC but has ARC-style weak ivars.
  1165. bool hasAutomaticIvars() {
  1166. return data()->ro->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC);
  1167. }
  1168. // Return YES if the class's ivars are managed by ARC.
  1169. bool isARC() {
  1170. return data()->ro->flags & RO_IS_ARC;
  1171. }
  1172. bool forbidsAssociatedObjects() {
  1173. return (data()->flags & RW_FORBIDS_ASSOCIATED_OBJECTS);
  1174. }
  1175. #if SUPPORT_NONPOINTER_ISA
  1176. // Tracked in non-pointer isas; not tracked otherwise
  1177. #else
  1178. bool instancesHaveAssociatedObjects() {
  1179. // this may be an unrealized future class in the CF-bridged case
  1180. ASSERT(isFuture() || isRealized());
  1181. return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
  1182. }
  1183. void setInstancesHaveAssociatedObjects() {
  1184. // this may be an unrealized future class in the CF-bridged case
  1185. ASSERT(isFuture() || isRealized());
  1186. setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
  1187. }
  1188. #endif
  1189. bool shouldGrowCache() {
  1190. return true;
  1191. }
  1192. void setShouldGrowCache(bool) {
  1193. // fixme good or bad for memory use?
  1194. }
  1195. bool isInitializing() {
  1196. return getMeta()->data()->flags & RW_INITIALIZING;
  1197. }
  1198. void setInitializing() {
  1199. ASSERT(!isMetaClass());
  1200. ISA()->setInfo(RW_INITIALIZING);
  1201. }
  1202. bool isInitialized() {
  1203. return getMeta()->data()->flags & RW_INITIALIZED;
  1204. }
  1205. void setInitialized();
  1206. bool isLoadable() {
  1207. ASSERT(isRealized());
  1208. return true; // any class registered for +load is definitely loadable
  1209. }
  1210. IMP getLoadMethod();
  1211. // Locking: To prevent concurrent realization, hold runtimeLock.
  1212. bool isRealized() const {
  1213. return !isStubClass() && (data()->flags & RW_REALIZED);
  1214. }
  1215. // Returns true if this is an unrealized future class.
  1216. // Locking: To prevent concurrent realization, hold runtimeLock.
  1217. bool isFuture() const {
  1218. return data()->flags & RW_FUTURE;
  1219. }
  1220. bool isMetaClass() {
  1221. ASSERT(this);
  1222. ASSERT(isRealized());
  1223. #if FAST_CACHE_META
  1224. return cache.getBit(FAST_CACHE_META);
  1225. #else
  1226. return data()->ro->flags & RO_META;
  1227. #endif
  1228. }
  1229. // Like isMetaClass, but also valid on un-realized classes
  1230. bool isMetaClassMaybeUnrealized() {
  1231. return bits.safe_ro()->flags & RO_META;
  1232. }
  1233. // NOT identical to this->ISA when this is a metaclass
  1234. Class getMeta() {
  1235. if (isMetaClass()) return (Class)this;
  1236. else return this->ISA();
  1237. }
  1238. bool isRootClass() {
  1239. return superclass == nil;
  1240. }
  1241. bool isRootMetaclass() {
  1242. return ISA() == (Class)this;
  1243. }
  1244. const char *mangledName() {
  1245. // fixme can't assert locks here
  1246. ASSERT(this);
  1247. if (isRealized() || isFuture()) {
  1248. return data()->ro->name;
  1249. } else {
  1250. return ((const class_ro_t *)data())->name;
  1251. }
  1252. }
  1253. const char *demangledName();
  1254. const char *nameForLogging();
  1255. // May be unaligned depending on class's ivars.
  1256. uint32_t unalignedInstanceStart() const {
  1257. ASSERT(isRealized());
  1258. return data()->ro->instanceStart;
  1259. }
  1260. // Class's instance start rounded up to a pointer-size boundary.
  1261. // This is used for ARC layout bitmaps.
  1262. uint32_t alignedInstanceStart() const {
  1263. return word_align(unalignedInstanceStart());
  1264. }
  1265. // May be unaligned depending on class's ivars.
  1266. uint32_t unalignedInstanceSize() const {
  1267. ASSERT(isRealized());
  1268. return data()->ro->instanceSize;
  1269. }
  1270. // Class's ivar size rounded up to a pointer-size boundary.
  1271. uint32_t alignedInstanceSize() const {
  1272. return word_align(unalignedInstanceSize());
  1273. }
  1274. size_t instanceSize(size_t extraBytes) const {
  1275. if (fastpath(cache.hasFastInstanceSize(extraBytes))) {
  1276. return cache.fastInstanceSize(extraBytes);
  1277. }
  1278. size_t size = alignedInstanceSize() + extraBytes;
  1279. // CF requires all objects be at least 16 bytes.
  1280. if (size < 16) size = 16;
  1281. return size;
  1282. }
  1283. void setInstanceSize(uint32_t newSize) {
  1284. ASSERT(isRealized());
  1285. ASSERT(data()->flags & RW_REALIZING);
  1286. if (newSize != data()->ro->instanceSize) {
  1287. ASSERT(data()->flags & RW_COPIED_RO);
  1288. *const_cast<uint32_t *>(&data()->ro->instanceSize) = newSize;
  1289. }
  1290. cache.setFastInstanceSize(newSize);
  1291. }
  1292. void chooseClassArrayIndex();
  1293. void setClassArrayIndex(unsigned Idx) {
  1294. bits.setClassArrayIndex(Idx);
  1295. }
  1296. unsigned classArrayIndex() {
  1297. return bits.classArrayIndex();
  1298. }
  1299. };
  1300. struct swift_class_t : objc_class {
  1301. uint32_t flags;
  1302. uint32_t instanceAddressOffset;
  1303. uint32_t instanceSize;
  1304. uint16_t instanceAlignMask;
  1305. uint16_t reserved;
  1306. uint32_t classSize;
  1307. uint32_t classAddressOffset;
  1308. void *description;
  1309. // ...
  1310. void *baseAddress() {
  1311. return (void *)((uint8_t *)this - classAddressOffset);
  1312. }
  1313. };
  1314. struct category_t {
  1315. const char *name;
  1316. classref_t cls;
  1317. struct method_list_t *instanceMethods;
  1318. struct method_list_t *classMethods;
  1319. struct protocol_list_t *protocols;
  1320. struct property_list_t *instanceProperties;
  1321. // Fields below this point are not always present on disk.
  1322. struct property_list_t *_classProperties;
  1323. method_list_t *methodsForMeta(bool isMeta) {
  1324. if (isMeta) return classMethods;
  1325. else return instanceMethods;
  1326. }
  1327. property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi);
  1328. protocol_list_t *protocolsForMeta(bool isMeta) {
  1329. if (isMeta) return nullptr;
  1330. else return protocols;
  1331. }
  1332. };
  1333. struct objc_super2 {
  1334. id receiver;
  1335. Class current_class;
  1336. };
  1337. struct message_ref_t {
  1338. IMP imp;
  1339. SEL sel;
  1340. };
  1341. extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
  1342. #endif