NSObject.mm 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383
  1. /*
  2. * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. #include "objc-private.h"
  24. #include "NSObject.h"
  25. #include "objc-weak.h"
  26. #include "DenseMapExtras.h"
  27. #include <malloc/malloc.h>
  28. #include <stdint.h>
  29. #include <stdbool.h>
  30. #include <mach/mach.h>
  31. #include <mach-o/dyld.h>
  32. #include <mach-o/nlist.h>
  33. #include <sys/types.h>
  34. #include <sys/mman.h>
  35. #include <Block.h>
  36. #include <map>
  37. #include <execinfo.h>
  38. #include "NSObject-internal.h"
  39. @interface NSInvocation
  40. - (SEL)selector;
  41. @end
  42. OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset = __builtin_offsetof(AutoreleasePoolPageData, magic);
  43. OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset = __builtin_offsetof(AutoreleasePoolPageData, next);
  44. OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset = __builtin_offsetof(AutoreleasePoolPageData, thread);
  45. OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __builtin_offsetof(AutoreleasePoolPageData, parent);
  46. OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
  47. OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
  48. OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
  49. /***********************************************************************
  50. * Weak ivar support
  51. **********************************************************************/
  52. static id defaultBadAllocHandler(Class cls)
  53. {
  54. _objc_fatal("attempt to allocate object of class '%s' failed",
  55. cls->nameForLogging());
  56. }
  57. id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
  58. id _objc_callBadAllocHandler(Class cls)
  59. {
  60. // fixme add re-entrancy protection in case allocation fails inside handler
  61. return (*badAllocHandler)(cls);
  62. }
  63. void _objc_setBadAllocHandler(id(*newHandler)(Class))
  64. {
  65. badAllocHandler = newHandler;
  66. }
  67. namespace {
  68. // The order of these bits is important.
  69. #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
  70. #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
  71. #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
  72. #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
  73. #define SIDE_TABLE_RC_SHIFT 2
  74. #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
  75. struct RefcountMapValuePurgeable {
  76. static inline bool isPurgeable(size_t x) {
  77. return x == 0;
  78. }
  79. };
  80. // RefcountMap disguises its pointers because we
  81. // don't want the table to act as a root for `leaks`.
  82. typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
  83. // Template parameters.
  84. enum HaveOld { DontHaveOld = false, DoHaveOld = true };
  85. enum HaveNew { DontHaveNew = false, DoHaveNew = true };
  86. struct SideTable {
  87. spinlock_t slock;
  88. RefcountMap refcnts;
  89. weak_table_t weak_table;
  90. SideTable() {
  91. memset(&weak_table, 0, sizeof(weak_table));
  92. }
  93. ~SideTable() {
  94. _objc_fatal("Do not delete SideTable.");
  95. }
  96. void lock() { slock.lock(); }
  97. void unlock() { slock.unlock(); }
  98. void forceReset() { slock.forceReset(); }
  99. // Address-ordered lock discipline for a pair of side tables.
  100. template<HaveOld, HaveNew>
  101. static void lockTwo(SideTable *lock1, SideTable *lock2);
  102. template<HaveOld, HaveNew>
  103. static void unlockTwo(SideTable *lock1, SideTable *lock2);
  104. };
  105. template<>
  106. void SideTable::lockTwo<DoHaveOld, DoHaveNew>
  107. (SideTable *lock1, SideTable *lock2)
  108. {
  109. spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
  110. }
  111. template<>
  112. void SideTable::lockTwo<DoHaveOld, DontHaveNew>
  113. (SideTable *lock1, SideTable *)
  114. {
  115. lock1->lock();
  116. }
  117. template<>
  118. void SideTable::lockTwo<DontHaveOld, DoHaveNew>
  119. (SideTable *, SideTable *lock2)
  120. {
  121. lock2->lock();
  122. }
  123. template<>
  124. void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
  125. (SideTable *lock1, SideTable *lock2)
  126. {
  127. spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
  128. }
  129. template<>
  130. void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
  131. (SideTable *lock1, SideTable *)
  132. {
  133. lock1->unlock();
  134. }
  135. template<>
  136. void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
  137. (SideTable *, SideTable *lock2)
  138. {
  139. lock2->unlock();
  140. }
  141. static objc::ExplicitInit<StripedMap<SideTable>> SideTablesMap;
  142. static StripedMap<SideTable>& SideTables() {
  143. return SideTablesMap.get();
  144. }
  145. // anonymous namespace
  146. };
  147. void SideTableLockAll() {
  148. SideTables().lockAll();
  149. }
  150. void SideTableUnlockAll() {
  151. SideTables().unlockAll();
  152. }
  153. void SideTableForceResetAll() {
  154. SideTables().forceResetAll();
  155. }
  156. void SideTableDefineLockOrder() {
  157. SideTables().defineLockOrder();
  158. }
  159. void SideTableLocksPrecedeLock(const void *newlock) {
  160. SideTables().precedeLock(newlock);
  161. }
  162. void SideTableLocksSucceedLock(const void *oldlock) {
  163. SideTables().succeedLock(oldlock);
  164. }
  165. void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
  166. int i = 0;
  167. const void *newlock;
  168. while ((newlock = newlocks.getLock(i++))) {
  169. SideTables().precedeLock(newlock);
  170. }
  171. }
  172. void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
  173. int i = 0;
  174. const void *oldlock;
  175. while ((oldlock = oldlocks.getLock(i++))) {
  176. SideTables().succeedLock(oldlock);
  177. }
  178. }
  179. //
  180. // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
  181. //
  182. id objc_retainBlock(id x) {
  183. return (id)_Block_copy(x);
  184. }
  185. //
  186. // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
  187. //
  188. BOOL objc_should_deallocate(id object) {
  189. return YES;
  190. }
  191. id
  192. objc_retain_autorelease(id obj)
  193. {
  194. return objc_autorelease(objc_retain(obj));
  195. }
  196. void
  197. objc_storeStrong(id *location, id obj)
  198. {
  199. id prev = *location;
  200. if (obj == prev) {
  201. return;
  202. }
  203. objc_retain(obj);
  204. *location = obj;
  205. objc_release(prev);
  206. }
  207. // Update a weak variable.
  208. // If HaveOld is true, the variable has an existing value
  209. // that needs to be cleaned up. This value might be nil.
  210. // If HaveNew is true, there is a new value that needs to be
  211. // assigned into the variable. This value might be nil.
  212. // If CrashIfDeallocating is true, the process is halted if newObj is
  213. // deallocating or newObj's class does not support weak references.
  214. // If CrashIfDeallocating is false, nil is stored instead.
  215. enum CrashIfDeallocating {
  216. DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
  217. };
  218. template <HaveOld haveOld, HaveNew haveNew,
  219. CrashIfDeallocating crashIfDeallocating>
  220. static id
  221. storeWeak(id *location, objc_object *newObj)
  222. {
  223. ASSERT(haveOld || haveNew);
  224. if (!haveNew) ASSERT(newObj == nil);
  225. Class previouslyInitializedClass = nil;
  226. id oldObj;
  227. SideTable *oldTable;
  228. SideTable *newTable;
  229. // Acquire locks for old and new values.
  230. // Order by lock address to prevent lock ordering problems.
  231. // Retry if the old value changes underneath us.
  232. retry:
  233. if (haveOld) {
  234. oldObj = *location;
  235. oldTable = &SideTables()[oldObj];
  236. } else {
  237. oldTable = nil;
  238. }
  239. if (haveNew) {
  240. newTable = &SideTables()[newObj];
  241. } else {
  242. newTable = nil;
  243. }
  244. SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
  245. if (haveOld && *location != oldObj) {
  246. SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
  247. goto retry;
  248. }
  249. // Prevent a deadlock between the weak reference machinery
  250. // and the +initialize machinery by ensuring that no
  251. // weakly-referenced object has an un-+initialized isa.
  252. if (haveNew && newObj) {
  253. Class cls = newObj->getIsa();
  254. if (cls != previouslyInitializedClass &&
  255. !((objc_class *)cls)->isInitialized())
  256. {
  257. SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
  258. class_initialize(cls, (id)newObj);
  259. // If this class is finished with +initialize then we're good.
  260. // If this class is still running +initialize on this thread
  261. // (i.e. +initialize called storeWeak on an instance of itself)
  262. // then we may proceed but it will appear initializing and
  263. // not yet initialized to the check above.
  264. // Instead set previouslyInitializedClass to recognize it on retry.
  265. previouslyInitializedClass = cls;
  266. goto retry;
  267. }
  268. }
  269. // Clean up old value, if any.
  270. if (haveOld) {
  271. weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
  272. }
  273. // Assign new value, if any.
  274. if (haveNew) {
  275. newObj = (objc_object *)
  276. weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
  277. crashIfDeallocating);
  278. // weak_register_no_lock returns nil if weak store should be rejected
  279. // Set is-weakly-referenced bit in refcount table.
  280. if (newObj && !newObj->isTaggedPointer()) {
  281. newObj->setWeaklyReferenced_nolock();
  282. }
  283. // Do not set *location anywhere else. That would introduce a race.
  284. *location = (id)newObj;
  285. }
  286. else {
  287. // No new value. The storage is not changed.
  288. }
  289. SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
  290. return (id)newObj;
  291. }
  292. /**
  293. * This function stores a new value into a __weak variable. It would
  294. * be used anywhere a __weak variable is the target of an assignment.
  295. *
  296. * @param location The address of the weak pointer itself
  297. * @param newObj The new object this weak ptr should now point to
  298. *
  299. * @return \e newObj
  300. */
  301. id
  302. objc_storeWeak(id *location, id newObj)
  303. {
  304. return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
  305. (location, (objc_object *)newObj);
  306. }
  307. /**
  308. * This function stores a new value into a __weak variable.
  309. * If the new object is deallocating or the new object's class
  310. * does not support weak references, stores nil instead.
  311. *
  312. * @param location The address of the weak pointer itself
  313. * @param newObj The new object this weak ptr should now point to
  314. *
  315. * @return The value stored (either the new object or nil)
  316. */
  317. id
  318. objc_storeWeakOrNil(id *location, id newObj)
  319. {
  320. return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
  321. (location, (objc_object *)newObj);
  322. }
  323. /**
  324. * Initialize a fresh weak pointer to some object location.
  325. * It would be used for code like:
  326. *
  327. * (The nil case)
  328. * __weak id weakPtr;
  329. * (The non-nil case)
  330. * NSObject *o = ...;
  331. * __weak id weakPtr = o;
  332. *
  333. * This function IS NOT thread-safe with respect to concurrent
  334. * modifications to the weak variable. (Concurrent weak clear is safe.)
  335. *
  336. * @param location Address of __weak ptr.
  337. * @param newObj Object ptr.
  338. */
  339. id
  340. objc_initWeak(id *location, id newObj)
  341. {
  342. if (!newObj) {
  343. *location = nil;
  344. return nil;
  345. }
  346. return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
  347. (location, (objc_object*)newObj);
  348. }
  349. id
  350. objc_initWeakOrNil(id *location, id newObj)
  351. {
  352. if (!newObj) {
  353. *location = nil;
  354. return nil;
  355. }
  356. return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
  357. (location, (objc_object*)newObj);
  358. }
  359. /**
  360. * Destroys the relationship between a weak pointer
  361. * and the object it is referencing in the internal weak
  362. * table. If the weak pointer is not referencing anything,
  363. * there is no need to edit the weak table.
  364. *
  365. * This function IS NOT thread-safe with respect to concurrent
  366. * modifications to the weak variable. (Concurrent weak clear is safe.)
  367. *
  368. * @param location The weak pointer address.
  369. */
  370. void
  371. objc_destroyWeak(id *location)
  372. {
  373. (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
  374. (location, nil);
  375. }
  376. /*
  377. Once upon a time we eagerly cleared *location if we saw the object
  378. was deallocating. This confuses code like NSPointerFunctions which
  379. tries to pre-flight the raw storage and assumes if the storage is
  380. zero then the weak system is done interfering. That is false: the
  381. weak system is still going to check and clear the storage later.
  382. This can cause objc_weak_error complaints and crashes.
  383. So we now don't touch the storage until deallocation completes.
  384. */
  385. id
  386. objc_loadWeakRetained(id *location)
  387. {
  388. id obj;
  389. id result;
  390. Class cls;
  391. SideTable *table;
  392. retry:
  393. // fixme std::atomic this load
  394. obj = *location;
  395. if (!obj) return nil;
  396. if (obj->isTaggedPointer()) return obj;
  397. table = &SideTables()[obj];
  398. table->lock();
  399. if (*location != obj) {
  400. table->unlock();
  401. goto retry;
  402. }
  403. result = obj;
  404. cls = obj->ISA();
  405. if (! cls->hasCustomRR()) {
  406. // Fast case. We know +initialize is complete because
  407. // default-RR can never be set before then.
  408. ASSERT(cls->isInitialized());
  409. if (! obj->rootTryRetain()) {
  410. result = nil;
  411. }
  412. }
  413. else {
  414. // Slow case. We must check for +initialize and call it outside
  415. // the lock if necessary in order to avoid deadlocks.
  416. if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
  417. BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
  418. class_getMethodImplementation(cls, @selector(retainWeakReference));
  419. if ((IMP)tryRetain == _objc_msgForward) {
  420. result = nil;
  421. }
  422. else if (! (*tryRetain)(obj, @selector(retainWeakReference))) {
  423. result = nil;
  424. }
  425. }
  426. else {
  427. table->unlock();
  428. class_initialize(cls, obj);
  429. goto retry;
  430. }
  431. }
  432. table->unlock();
  433. return result;
  434. }
  435. /**
  436. * This loads the object referenced by a weak pointer and returns it, after
  437. * retaining and autoreleasing the object to ensure that it stays alive
  438. * long enough for the caller to use it. This function would be used
  439. * anywhere a __weak variable is used in an expression.
  440. *
  441. * @param location The weak pointer address
  442. *
  443. * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
  444. */
  445. id
  446. objc_loadWeak(id *location)
  447. {
  448. if (!*location) return nil;
  449. return objc_autorelease(objc_loadWeakRetained(location));
  450. }
  451. /**
  452. * This function copies a weak pointer from one location to another,
  453. * when the destination doesn't already contain a weak pointer. It
  454. * would be used for code like:
  455. *
  456. * __weak id src = ...;
  457. * __weak id dst = src;
  458. *
  459. * This function IS NOT thread-safe with respect to concurrent
  460. * modifications to the destination variable. (Concurrent weak clear is safe.)
  461. *
  462. * @param dst The destination variable.
  463. * @param src The source variable.
  464. */
  465. void
  466. objc_copyWeak(id *dst, id *src)
  467. {
  468. id obj = objc_loadWeakRetained(src);
  469. objc_initWeak(dst, obj);
  470. objc_release(obj);
  471. }
  472. /**
  473. * Move a weak pointer from one location to another.
  474. * Before the move, the destination must be uninitialized.
  475. * After the move, the source is nil.
  476. *
  477. * This function IS NOT thread-safe with respect to concurrent
  478. * modifications to either weak variable. (Concurrent weak clear is safe.)
  479. *
  480. */
  481. void
  482. objc_moveWeak(id *dst, id *src)
  483. {
  484. objc_copyWeak(dst, src);
  485. objc_destroyWeak(src);
  486. *src = nil;
  487. }
  488. /***********************************************************************
  489. Autorelease pool implementation
  490. A thread's autorelease pool is a stack of pointers.
  491. Each pointer is either an object to release, or POOL_BOUNDARY which is
  492. an autorelease pool boundary.
  493. A pool token is a pointer to the POOL_BOUNDARY for that pool. When
  494. the pool is popped, every object hotter than the sentinel is released.
  495. The stack is divided into a doubly-linked list of pages. Pages are added
  496. and deleted as necessary.
  497. Thread-local storage points to the hot page, where newly autoreleased
  498. objects are stored.
  499. **********************************************************************/
  500. BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
  501. BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
  502. class AutoreleasePoolPage : private AutoreleasePoolPageData
  503. {
  504. friend struct thread_data_t;
  505. public:
  506. static size_t const SIZE =
  507. #if PROTECT_AUTORELEASEPOOL
  508. PAGE_MAX_SIZE; // must be multiple of vm page size
  509. #else
  510. PAGE_MIN_SIZE; // size and alignment, power of 2
  511. #endif
  512. private:
  513. static pthread_key_t const key = AUTORELEASE_POOL_KEY;
  514. static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
  515. static size_t const COUNT = SIZE / sizeof(id);
  516. // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
  517. // pushed and it has never contained any objects. This saves memory
  518. // when the top level (i.e. libdispatch) pushes and pops pools but
  519. // never uses them.
  520. # define EMPTY_POOL_PLACEHOLDER ((id*)1)
  521. # define POOL_BOUNDARY nil
  522. // SIZE-sizeof(*this) bytes of contents follow
  523. static void * operator new(size_t size) {
  524. return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
  525. }
  526. static void operator delete(void * p) {
  527. return free(p);
  528. }
  529. inline void protect() {
  530. #if PROTECT_AUTORELEASEPOOL
  531. mprotect(this, SIZE, PROT_READ);
  532. check();
  533. #endif
  534. }
  535. inline void unprotect() {
  536. #if PROTECT_AUTORELEASEPOOL
  537. check();
  538. mprotect(this, SIZE, PROT_READ | PROT_WRITE);
  539. #endif
  540. }
  541. AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
  542. AutoreleasePoolPageData(begin(),
  543. objc_thread_self(),
  544. newParent,
  545. newParent ? 1+newParent->depth : 0,
  546. newParent ? newParent->hiwat : 0)
  547. {
  548. if (parent) {
  549. parent->check();
  550. ASSERT(!parent->child);
  551. parent->unprotect();
  552. parent->child = this;
  553. parent->protect();
  554. }
  555. protect();
  556. }
  557. ~AutoreleasePoolPage()
  558. {
  559. check();
  560. unprotect();
  561. ASSERT(empty());
  562. // Not recursive: we don't want to blow out the stack
  563. // if a thread accumulates a stupendous amount of garbage
  564. ASSERT(!child);
  565. }
  566. template<typename Fn>
  567. void
  568. busted(Fn log) const
  569. {
  570. magic_t right;
  571. log("autorelease pool page %p corrupted\n"
  572. " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
  573. " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
  574. " pthread %p\n"
  575. " should be %p\n",
  576. this,
  577. magic.m[0], magic.m[1], magic.m[2], magic.m[3],
  578. right.m[0], right.m[1], right.m[2], right.m[3],
  579. this->thread, objc_thread_self());
  580. }
  581. __attribute__((noinline, cold, noreturn))
  582. void
  583. busted_die() const
  584. {
  585. busted(_objc_fatal);
  586. __builtin_unreachable();
  587. }
  588. inline void
  589. check(bool die = true) const
  590. {
  591. if (!magic.check() || thread != objc_thread_self()) {
  592. if (die) {
  593. busted_die();
  594. } else {
  595. busted(_objc_inform);
  596. }
  597. }
  598. }
  599. inline void
  600. fastcheck() const
  601. {
  602. #if CHECK_AUTORELEASEPOOL
  603. check();
  604. #else
  605. if (! magic.fastcheck()) {
  606. busted_die();
  607. }
  608. #endif
  609. }
  610. id * begin() {
  611. return (id *) ((uint8_t *)this+sizeof(*this));
  612. }
  613. id * end() {
  614. return (id *) ((uint8_t *)this+SIZE);
  615. }
  616. bool empty() {
  617. return next == begin();
  618. }
  619. bool full() {
  620. return next == end();
  621. }
  622. bool lessThanHalfFull() {
  623. return (next - begin() < (end() - begin()) / 2);
  624. }
  625. id *add(id obj)
  626. {
  627. ASSERT(!full());
  628. unprotect();
  629. id *ret = next; // faster than `return next-1` because of aliasing
  630. *next++ = obj;
  631. protect();
  632. return ret;
  633. }
  634. void releaseAll()
  635. {
  636. releaseUntil(begin());
  637. }
  638. void releaseUntil(id *stop)
  639. {
  640. // Not recursive: we don't want to blow out the stack
  641. // if a thread accumulates a stupendous amount of garbage
  642. while (this->next != stop) {
  643. // Restart from hotPage() every time, in case -release
  644. // autoreleased more objects
  645. AutoreleasePoolPage *page = hotPage();
  646. // fixme I think this `while` can be `if`, but I can't prove it
  647. while (page->empty()) {
  648. page = page->parent;
  649. setHotPage(page);
  650. }
  651. page->unprotect();
  652. id obj = *--page->next;
  653. memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
  654. page->protect();
  655. if (obj != POOL_BOUNDARY) {
  656. objc_release(obj);
  657. }
  658. }
  659. setHotPage(this);
  660. #if DEBUG
  661. // we expect any children to be completely empty
  662. for (AutoreleasePoolPage *page = child; page; page = page->child) {
  663. ASSERT(page->empty());
  664. }
  665. #endif
  666. }
  667. void kill()
  668. {
  669. // Not recursive: we don't want to blow out the stack
  670. // if a thread accumulates a stupendous amount of garbage
  671. AutoreleasePoolPage *page = this;
  672. while (page->child) page = page->child;
  673. AutoreleasePoolPage *deathptr;
  674. do {
  675. deathptr = page;
  676. page = page->parent;
  677. if (page) {
  678. page->unprotect();
  679. page->child = nil;
  680. page->protect();
  681. }
  682. delete deathptr;
  683. } while (deathptr != this);
  684. }
  685. static void tls_dealloc(void *p)
  686. {
  687. if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
  688. // No objects or pool pages to clean up here.
  689. return;
  690. }
  691. // reinstate TLS value while we work
  692. setHotPage((AutoreleasePoolPage *)p);
  693. if (AutoreleasePoolPage *page = coldPage()) {
  694. if (!page->empty()) objc_autoreleasePoolPop(page->begin()); // pop all of the pools
  695. if (slowpath(DebugMissingPools || DebugPoolAllocation)) {
  696. // pop() killed the pages already
  697. } else {
  698. page->kill(); // free all of the pages
  699. }
  700. }
  701. // clear TLS value so TLS destruction doesn't loop
  702. setHotPage(nil);
  703. }
  704. static AutoreleasePoolPage *pageForPointer(const void *p)
  705. {
  706. return pageForPointer((uintptr_t)p);
  707. }
  708. static AutoreleasePoolPage *pageForPointer(uintptr_t p)
  709. {
  710. AutoreleasePoolPage *result;
  711. uintptr_t offset = p % SIZE;
  712. ASSERT(offset >= sizeof(AutoreleasePoolPage));
  713. result = (AutoreleasePoolPage *)(p - offset);
  714. result->fastcheck();
  715. return result;
  716. }
  717. static inline bool haveEmptyPoolPlaceholder()
  718. {
  719. id *tls = (id *)tls_get_direct(key);
  720. return (tls == EMPTY_POOL_PLACEHOLDER);
  721. }
  722. static inline id* setEmptyPoolPlaceholder()
  723. {
  724. ASSERT(tls_get_direct(key) == nil);
  725. tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
  726. return EMPTY_POOL_PLACEHOLDER;
  727. }
  728. static inline AutoreleasePoolPage *hotPage()
  729. {
  730. AutoreleasePoolPage *result = (AutoreleasePoolPage *)
  731. tls_get_direct(key);
  732. if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
  733. if (result) result->fastcheck();
  734. return result;
  735. }
  736. static inline void setHotPage(AutoreleasePoolPage *page)
  737. {
  738. if (page) page->fastcheck();
  739. tls_set_direct(key, (void *)page);
  740. }
  741. static inline AutoreleasePoolPage *coldPage()
  742. {
  743. AutoreleasePoolPage *result = hotPage();
  744. if (result) {
  745. while (result->parent) {
  746. result = result->parent;
  747. result->fastcheck();
  748. }
  749. }
  750. return result;
  751. }
  752. static inline id *autoreleaseFast(id obj)
  753. {
  754. AutoreleasePoolPage *page = hotPage();
  755. if (page && !page->full()) {
  756. return page->add(obj);
  757. } else if (page) {
  758. return autoreleaseFullPage(obj, page);
  759. } else {
  760. return autoreleaseNoPage(obj);
  761. }
  762. }
  763. static __attribute__((noinline))
  764. id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
  765. {
  766. // The hot page is full.
  767. // Step to the next non-full page, adding a new page if necessary.
  768. // Then add the object to that page.
  769. ASSERT(page == hotPage());
  770. ASSERT(page->full() || DebugPoolAllocation);
  771. do {
  772. if (page->child) page = page->child;
  773. else page = new AutoreleasePoolPage(page);
  774. } while (page->full());
  775. setHotPage(page);
  776. return page->add(obj);
  777. }
  778. static __attribute__((noinline))
  779. id *autoreleaseNoPage(id obj)
  780. {
  781. // "No page" could mean no pool has been pushed
  782. // or an empty placeholder pool has been pushed and has no contents yet
  783. ASSERT(!hotPage());
  784. bool pushExtraBoundary = false;
  785. if (haveEmptyPoolPlaceholder()) {
  786. // We are pushing a second pool over the empty placeholder pool
  787. // or pushing the first object into the empty placeholder pool.
  788. // Before doing that, push a pool boundary on behalf of the pool
  789. // that is currently represented by the empty placeholder.
  790. pushExtraBoundary = true;
  791. }
  792. else if (obj != POOL_BOUNDARY && DebugMissingPools) {
  793. // We are pushing an object with no pool in place,
  794. // and no-pool debugging was requested by environment.
  795. _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
  796. "autoreleased with no pool in place - "
  797. "just leaking - break on "
  798. "objc_autoreleaseNoPool() to debug",
  799. objc_thread_self(), (void*)obj, object_getClassName(obj));
  800. objc_autoreleaseNoPool(obj);
  801. return nil;
  802. }
  803. else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
  804. // We are pushing a pool with no pool in place,
  805. // and alloc-per-pool debugging was not requested.
  806. // Install and return the empty pool placeholder.
  807. return setEmptyPoolPlaceholder();
  808. }
  809. // We are pushing an object or a non-placeholder'd pool.
  810. // Install the first page.
  811. AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
  812. setHotPage(page);
  813. // Push a boundary on behalf of the previously-placeholder'd pool.
  814. if (pushExtraBoundary) {
  815. page->add(POOL_BOUNDARY);
  816. }
  817. // Push the requested object or pool.
  818. return page->add(obj);
  819. }
  820. static __attribute__((noinline))
  821. id *autoreleaseNewPage(id obj)
  822. {
  823. AutoreleasePoolPage *page = hotPage();
  824. if (page) return autoreleaseFullPage(obj, page);
  825. else return autoreleaseNoPage(obj);
  826. }
  827. public:
  828. static inline id autorelease(id obj)
  829. {
  830. ASSERT(obj);
  831. ASSERT(!obj->isTaggedPointer());
  832. id *dest __unused = autoreleaseFast(obj);
  833. ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
  834. return obj;
  835. }
  836. static inline void *push()
  837. {
  838. id *dest;
  839. if (slowpath(DebugPoolAllocation)) {
  840. // Each autorelease pool starts on a new pool page.
  841. dest = autoreleaseNewPage(POOL_BOUNDARY);
  842. } else {
  843. dest = autoreleaseFast(POOL_BOUNDARY);
  844. }
  845. ASSERT(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
  846. return dest;
  847. }
  848. __attribute__((noinline, cold))
  849. static void badPop(void *token)
  850. {
  851. // Error. For bincompat purposes this is not
  852. // fatal in executables built with old SDKs.
  853. if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
  854. // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
  855. _objc_fatal
  856. ("Invalid or prematurely-freed autorelease pool %p.", token);
  857. }
  858. // Old SDK. Bad pop is warned once.
  859. static bool complained = false;
  860. if (!complained) {
  861. complained = true;
  862. _objc_inform_now_and_on_crash
  863. ("Invalid or prematurely-freed autorelease pool %p. "
  864. "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
  865. "Proceeding anyway because the app is old "
  866. "(SDK version " SDK_FORMAT "). Memory errors are likely.",
  867. token, FORMAT_SDK(sdkVersion()));
  868. }
  869. objc_autoreleasePoolInvalid(token);
  870. }
  871. template<bool allowDebug>
  872. static void
  873. popPage(void *token, AutoreleasePoolPage *page, id *stop)
  874. {
  875. if (allowDebug && PrintPoolHiwat) printHiwat();
  876. page->releaseUntil(stop);
  877. // memory: delete empty children
  878. if (allowDebug && DebugPoolAllocation && page->empty()) {
  879. // special case: delete everything during page-per-pool debugging
  880. AutoreleasePoolPage *parent = page->parent;
  881. page->kill();
  882. setHotPage(parent);
  883. } else if (allowDebug && DebugMissingPools && page->empty() && !page->parent) {
  884. // special case: delete everything for pop(top)
  885. // when debugging missing autorelease pools
  886. page->kill();
  887. setHotPage(nil);
  888. } else if (page->child) {
  889. // hysteresis: keep one empty child if page is more than half full
  890. if (page->lessThanHalfFull()) {
  891. page->child->kill();
  892. }
  893. else if (page->child->child) {
  894. page->child->child->kill();
  895. }
  896. }
  897. }
  898. __attribute__((noinline, cold))
  899. static void
  900. popPageDebug(void *token, AutoreleasePoolPage *page, id *stop)
  901. {
  902. popPage<true>(token, page, stop);
  903. }
  904. static inline void
  905. pop(void *token)
  906. {
  907. AutoreleasePoolPage *page;
  908. id *stop;
  909. if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
  910. // Popping the top-level placeholder pool.
  911. page = hotPage();
  912. if (!page) {
  913. // Pool was never used. Clear the placeholder.
  914. return setHotPage(nil);
  915. }
  916. // Pool was used. Pop its contents normally.
  917. // Pool pages remain allocated for re-use as usual.
  918. page = coldPage();
  919. token = page->begin();
  920. } else {
  921. page = pageForPointer(token);
  922. }
  923. stop = (id *)token;
  924. if (*stop != POOL_BOUNDARY) {
  925. if (stop == page->begin() && !page->parent) {
  926. // Start of coldest page may correctly not be POOL_BOUNDARY:
  927. // 1. top-level pool is popped, leaving the cold page in place
  928. // 2. an object is autoreleased with no pool
  929. } else {
  930. // Error. For bincompat purposes this is not
  931. // fatal in executables built with old SDKs.
  932. return badPop(token);
  933. }
  934. }
  935. if (slowpath(PrintPoolHiwat || DebugPoolAllocation || DebugMissingPools)) {
  936. return popPageDebug(token, page, stop);
  937. }
  938. return popPage<false>(token, page, stop);
  939. }
  940. static void init()
  941. {
  942. int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
  943. AutoreleasePoolPage::tls_dealloc);
  944. ASSERT(r == 0);
  945. }
  946. __attribute__((noinline, cold))
  947. void print()
  948. {
  949. _objc_inform("[%p] ................ PAGE %s %s %s", this,
  950. full() ? "(full)" : "",
  951. this == hotPage() ? "(hot)" : "",
  952. this == coldPage() ? "(cold)" : "");
  953. check(false);
  954. for (id *p = begin(); p < next; p++) {
  955. if (*p == POOL_BOUNDARY) {
  956. _objc_inform("[%p] ################ POOL %p", p, p);
  957. } else {
  958. _objc_inform("[%p] %#16lx %s",
  959. p, (unsigned long)*p, object_getClassName(*p));
  960. }
  961. }
  962. }
  963. __attribute__((noinline, cold))
  964. static void printAll()
  965. {
  966. _objc_inform("##############");
  967. _objc_inform("AUTORELEASE POOLS for thread %p", objc_thread_self());
  968. AutoreleasePoolPage *page;
  969. ptrdiff_t objects = 0;
  970. for (page = coldPage(); page; page = page->child) {
  971. objects += page->next - page->begin();
  972. }
  973. _objc_inform("%llu releases pending.", (unsigned long long)objects);
  974. if (haveEmptyPoolPlaceholder()) {
  975. _objc_inform("[%p] ................ PAGE (placeholder)",
  976. EMPTY_POOL_PLACEHOLDER);
  977. _objc_inform("[%p] ################ POOL (placeholder)",
  978. EMPTY_POOL_PLACEHOLDER);
  979. }
  980. else {
  981. for (page = coldPage(); page; page = page->child) {
  982. page->print();
  983. }
  984. }
  985. _objc_inform("##############");
  986. }
  987. __attribute__((noinline, cold))
  988. static void printHiwat()
  989. {
  990. // Check and propagate high water mark
  991. // Ignore high water marks under 256 to suppress noise.
  992. AutoreleasePoolPage *p = hotPage();
  993. uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
  994. if (mark > p->hiwat && mark > 256) {
  995. for( ; p; p = p->parent) {
  996. p->unprotect();
  997. p->hiwat = mark;
  998. p->protect();
  999. }
  1000. _objc_inform("POOL HIGHWATER: new high water mark of %u "
  1001. "pending releases for thread %p:",
  1002. mark, objc_thread_self());
  1003. void *stack[128];
  1004. int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
  1005. char **sym = backtrace_symbols(stack, count);
  1006. for (int i = 0; i < count; i++) {
  1007. _objc_inform("POOL HIGHWATER: %s", sym[i]);
  1008. }
  1009. free(sym);
  1010. }
  1011. }
  1012. #undef POOL_BOUNDARY
  1013. };
  1014. /***********************************************************************
  1015. * Slow paths for inline control
  1016. **********************************************************************/
  1017. #if SUPPORT_NONPOINTER_ISA
  1018. NEVER_INLINE id
  1019. objc_object::rootRetain_overflow(bool tryRetain)
  1020. {
  1021. return rootRetain(tryRetain, true);
  1022. }
  1023. NEVER_INLINE uintptr_t
  1024. objc_object::rootRelease_underflow(bool performDealloc)
  1025. {
  1026. return rootRelease(performDealloc, true);
  1027. }
  1028. // Slow path of clearDeallocating()
  1029. // for objects with nonpointer isa
  1030. // that were ever weakly referenced
  1031. // or whose retain count ever overflowed to the side table.
  1032. NEVER_INLINE void
  1033. objc_object::clearDeallocating_slow()
  1034. {
  1035. ASSERT(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
  1036. SideTable& table = SideTables()[this];
  1037. table.lock();
  1038. if (isa.weakly_referenced) {
  1039. weak_clear_no_lock(&table.weak_table, (id)this);
  1040. }
  1041. if (isa.has_sidetable_rc) {
  1042. table.refcnts.erase(this);
  1043. }
  1044. table.unlock();
  1045. }
  1046. #endif
  1047. __attribute__((noinline,used))
  1048. id
  1049. objc_object::rootAutorelease2()
  1050. {
  1051. ASSERT(!isTaggedPointer());
  1052. return AutoreleasePoolPage::autorelease((id)this);
  1053. }
  1054. BREAKPOINT_FUNCTION(
  1055. void objc_overrelease_during_dealloc_error(void)
  1056. );
  1057. NEVER_INLINE uintptr_t
  1058. objc_object::overrelease_error()
  1059. {
  1060. _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
  1061. objc_overrelease_during_dealloc_error();
  1062. return 0; // allow rootRelease() to tail-call this
  1063. }
  1064. /***********************************************************************
  1065. * Retain count operations for side table.
  1066. **********************************************************************/
  1067. #if DEBUG
  1068. // Used to assert that an object is not present in the side table.
  1069. bool
  1070. objc_object::sidetable_present()
  1071. {
  1072. bool result = false;
  1073. SideTable& table = SideTables()[this];
  1074. table.lock();
  1075. RefcountMap::iterator it = table.refcnts.find(this);
  1076. if (it != table.refcnts.end()) result = true;
  1077. if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
  1078. table.unlock();
  1079. return result;
  1080. }
  1081. #endif
  1082. #if SUPPORT_NONPOINTER_ISA
  1083. void
  1084. objc_object::sidetable_lock()
  1085. {
  1086. SideTable& table = SideTables()[this];
  1087. table.lock();
  1088. }
  1089. void
  1090. objc_object::sidetable_unlock()
  1091. {
  1092. SideTable& table = SideTables()[this];
  1093. table.unlock();
  1094. }
  1095. // Move the entire retain count to the side table,
  1096. // as well as isDeallocating and weaklyReferenced.
  1097. void
  1098. objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
  1099. bool isDeallocating,
  1100. bool weaklyReferenced)
  1101. {
  1102. ASSERT(!isa.nonpointer); // should already be changed to raw pointer
  1103. SideTable& table = SideTables()[this];
  1104. size_t& refcntStorage = table.refcnts[this];
  1105. size_t oldRefcnt = refcntStorage;
  1106. // not deallocating - that was in the isa
  1107. ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
  1108. ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
  1109. uintptr_t carry;
  1110. size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
  1111. if (carry) refcnt = SIDE_TABLE_RC_PINNED;
  1112. if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
  1113. if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
  1114. refcntStorage = refcnt;
  1115. }
  1116. // Move some retain counts to the side table from the isa field.
  1117. // Returns true if the object is now pinned.
  1118. bool
  1119. objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
  1120. {
  1121. ASSERT(isa.nonpointer);
  1122. SideTable& table = SideTables()[this];
  1123. size_t& refcntStorage = table.refcnts[this];
  1124. size_t oldRefcnt = refcntStorage;
  1125. // isa-side bits should not be set here
  1126. ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
  1127. ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
  1128. if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
  1129. uintptr_t carry;
  1130. size_t newRefcnt =
  1131. addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
  1132. if (carry) {
  1133. refcntStorage =
  1134. SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
  1135. return true;
  1136. }
  1137. else {
  1138. refcntStorage = newRefcnt;
  1139. return false;
  1140. }
  1141. }
  1142. // Move some retain counts from the side table to the isa field.
  1143. // Returns the actual count subtracted, which may be less than the request.
  1144. size_t
  1145. objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
  1146. {
  1147. ASSERT(isa.nonpointer);
  1148. SideTable& table = SideTables()[this];
  1149. RefcountMap::iterator it = table.refcnts.find(this);
  1150. if (it == table.refcnts.end() || it->second == 0) {
  1151. // Side table retain count is zero. Can't borrow.
  1152. return 0;
  1153. }
  1154. size_t oldRefcnt = it->second;
  1155. // isa-side bits should not be set here
  1156. ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
  1157. ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
  1158. size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
  1159. ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
  1160. it->second = newRefcnt;
  1161. return delta_rc;
  1162. }
  1163. size_t
  1164. objc_object::sidetable_getExtraRC_nolock()
  1165. {
  1166. ASSERT(isa.nonpointer);
  1167. SideTable& table = SideTables()[this];
  1168. RefcountMap::iterator it = table.refcnts.find(this);
  1169. if (it == table.refcnts.end()) return 0;
  1170. else return it->second >> SIDE_TABLE_RC_SHIFT;
  1171. }
  1172. // SUPPORT_NONPOINTER_ISA
  1173. #endif
  1174. id
  1175. objc_object::sidetable_retain()
  1176. {
  1177. #if SUPPORT_NONPOINTER_ISA
  1178. ASSERT(!isa.nonpointer);
  1179. #endif
  1180. SideTable& table = SideTables()[this];
  1181. table.lock();
  1182. size_t& refcntStorage = table.refcnts[this];
  1183. if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
  1184. refcntStorage += SIDE_TABLE_RC_ONE;
  1185. }
  1186. table.unlock();
  1187. return (id)this;
  1188. }
  1189. bool
  1190. objc_object::sidetable_tryRetain()
  1191. {
  1192. #if SUPPORT_NONPOINTER_ISA
  1193. ASSERT(!isa.nonpointer);
  1194. #endif
  1195. SideTable& table = SideTables()[this];
  1196. // NO SPINLOCK HERE
  1197. // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
  1198. // which already acquired the lock on our behalf.
  1199. // fixme can't do this efficiently with os_lock_handoff_s
  1200. // if (table.slock == 0) {
  1201. // _objc_fatal("Do not call -_tryRetain.");
  1202. // }
  1203. bool result = true;
  1204. auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE);
  1205. auto &refcnt = it.first->second;
  1206. if (it.second) {
  1207. // there was no entry
  1208. } else if (refcnt & SIDE_TABLE_DEALLOCATING) {
  1209. result = false;
  1210. } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
  1211. refcnt += SIDE_TABLE_RC_ONE;
  1212. }
  1213. return result;
  1214. }
  1215. uintptr_t
  1216. objc_object::sidetable_retainCount()
  1217. {
  1218. SideTable& table = SideTables()[this];
  1219. size_t refcnt_result = 1;
  1220. table.lock();
  1221. RefcountMap::iterator it = table.refcnts.find(this);
  1222. if (it != table.refcnts.end()) {
  1223. // this is valid for SIDE_TABLE_RC_PINNED too
  1224. refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
  1225. }
  1226. table.unlock();
  1227. return refcnt_result;
  1228. }
  1229. bool
  1230. objc_object::sidetable_isDeallocating()
  1231. {
  1232. SideTable& table = SideTables()[this];
  1233. // NO SPINLOCK HERE
  1234. // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
  1235. // which already acquired the lock on our behalf.
  1236. // fixme can't do this efficiently with os_lock_handoff_s
  1237. // if (table.slock == 0) {
  1238. // _objc_fatal("Do not call -_isDeallocating.");
  1239. // }
  1240. RefcountMap::iterator it = table.refcnts.find(this);
  1241. return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
  1242. }
  1243. bool
  1244. objc_object::sidetable_isWeaklyReferenced()
  1245. {
  1246. bool result = false;
  1247. SideTable& table = SideTables()[this];
  1248. table.lock();
  1249. RefcountMap::iterator it = table.refcnts.find(this);
  1250. if (it != table.refcnts.end()) {
  1251. result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
  1252. }
  1253. table.unlock();
  1254. return result;
  1255. }
  1256. void
  1257. objc_object::sidetable_setWeaklyReferenced_nolock()
  1258. {
  1259. #if SUPPORT_NONPOINTER_ISA
  1260. ASSERT(!isa.nonpointer);
  1261. #endif
  1262. SideTable& table = SideTables()[this];
  1263. table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
  1264. }
  1265. // rdar://20206767
  1266. // return uintptr_t instead of bool so that the various raw-isa
  1267. // -release paths all return zero in eax
  1268. uintptr_t
  1269. objc_object::sidetable_release(bool performDealloc)
  1270. {
  1271. #if SUPPORT_NONPOINTER_ISA
  1272. ASSERT(!isa.nonpointer);
  1273. #endif
  1274. SideTable& table = SideTables()[this];
  1275. bool do_dealloc = false;
  1276. table.lock();
  1277. auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
  1278. auto &refcnt = it.first->second;
  1279. if (it.second) {
  1280. do_dealloc = true;
  1281. } else if (refcnt < SIDE_TABLE_DEALLOCATING) {
  1282. // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
  1283. do_dealloc = true;
  1284. refcnt |= SIDE_TABLE_DEALLOCATING;
  1285. } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
  1286. refcnt -= SIDE_TABLE_RC_ONE;
  1287. }
  1288. table.unlock();
  1289. if (do_dealloc && performDealloc) {
  1290. ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
  1291. }
  1292. return do_dealloc;
  1293. }
  1294. void
  1295. objc_object::sidetable_clearDeallocating()
  1296. {
  1297. SideTable& table = SideTables()[this];
  1298. // clear any weak table items
  1299. // clear extra retain count and deallocating bit
  1300. // (fixme warn or abort if extra retain count == 0 ?)
  1301. table.lock();
  1302. RefcountMap::iterator it = table.refcnts.find(this);
  1303. if (it != table.refcnts.end()) {
  1304. if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
  1305. weak_clear_no_lock(&table.weak_table, (id)this);
  1306. }
  1307. table.refcnts.erase(it);
  1308. }
  1309. table.unlock();
  1310. }
  1311. /***********************************************************************
  1312. * Optimized retain/release/autorelease entrypoints
  1313. **********************************************************************/
  1314. #if __OBJC2__
  1315. __attribute__((aligned(16), flatten, noinline))
  1316. id
  1317. objc_retain(id obj)
  1318. {
  1319. if (!obj) return obj;
  1320. if (obj->isTaggedPointer()) return obj;
  1321. return obj->retain();
  1322. }
  1323. __attribute__((aligned(16), flatten, noinline))
  1324. void
  1325. objc_release(id obj)
  1326. {
  1327. if (!obj) return;
  1328. if (obj->isTaggedPointer()) return;
  1329. return obj->release();
  1330. }
  1331. __attribute__((aligned(16), flatten, noinline))
  1332. id
  1333. objc_autorelease(id obj)
  1334. {
  1335. if (!obj) return obj;
  1336. if (obj->isTaggedPointer()) return obj;
  1337. return obj->autorelease();
  1338. }
  1339. // OBJC2
  1340. #else
  1341. // not OBJC2
  1342. id objc_retain(id obj) { return [obj retain]; }
  1343. void objc_release(id obj) { [obj release]; }
  1344. id objc_autorelease(id obj) { return [obj autorelease]; }
  1345. #endif
  1346. /***********************************************************************
  1347. * Basic operations for root class implementations a.k.a. _objc_root*()
  1348. **********************************************************************/
  1349. bool
  1350. _objc_rootTryRetain(id obj)
  1351. {
  1352. ASSERT(obj);
  1353. return obj->rootTryRetain();
  1354. }
  1355. bool
  1356. _objc_rootIsDeallocating(id obj)
  1357. {
  1358. ASSERT(obj);
  1359. return obj->rootIsDeallocating();
  1360. }
  1361. void
  1362. objc_clear_deallocating(id obj)
  1363. {
  1364. ASSERT(obj);
  1365. if (obj->isTaggedPointer()) return;
  1366. obj->clearDeallocating();
  1367. }
  1368. bool
  1369. _objc_rootReleaseWasZero(id obj)
  1370. {
  1371. ASSERT(obj);
  1372. return obj->rootReleaseShouldDealloc();
  1373. }
  1374. NEVER_INLINE id
  1375. _objc_rootAutorelease(id obj)
  1376. {
  1377. ASSERT(obj);
  1378. return obj->rootAutorelease();
  1379. }
  1380. uintptr_t
  1381. _objc_rootRetainCount(id obj)
  1382. {
  1383. ASSERT(obj);
  1384. return obj->rootRetainCount();
  1385. }
  1386. NEVER_INLINE id
  1387. _objc_rootRetain(id obj)
  1388. {
  1389. ASSERT(obj);
  1390. return obj->rootRetain();
  1391. }
  1392. NEVER_INLINE void
  1393. _objc_rootRelease(id obj)
  1394. {
  1395. ASSERT(obj);
  1396. obj->rootRelease();
  1397. }
  1398. // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
  1399. // shortcutting optimizations.
  1400. static ALWAYS_INLINE id
  1401. callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
  1402. {
  1403. #if __OBJC2__
  1404. if (slowpath(checkNil && !cls)) return nil;
  1405. if (fastpath(!cls->ISA()->hasCustomAWZ())) {
  1406. return _objc_rootAllocWithZone(cls, nil);
  1407. }
  1408. #endif
  1409. // No shortcuts available.
  1410. if (allocWithZone) {
  1411. return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil);
  1412. }
  1413. return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(alloc));
  1414. }
  1415. // Base class implementation of +alloc. cls is not nil.
  1416. // Calls [cls allocWithZone:nil].
  1417. id
  1418. _objc_rootAlloc(Class cls)
  1419. {
  1420. return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
  1421. }
  1422. // Calls [cls alloc].
  1423. id
  1424. objc_alloc(Class cls)
  1425. {
  1426. return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
  1427. }
  1428. // Calls [cls allocWithZone:nil].
  1429. id
  1430. objc_allocWithZone(Class cls)
  1431. {
  1432. return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
  1433. }
  1434. // Calls [[cls alloc] init].
  1435. id
  1436. objc_alloc_init(Class cls)
  1437. {
  1438. return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
  1439. }
  1440. // Calls [cls new]
  1441. id
  1442. objc_opt_new(Class cls)
  1443. {
  1444. #if __OBJC2__
  1445. if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
  1446. return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init];
  1447. }
  1448. #endif
  1449. return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
  1450. }
  1451. // Calls [obj self]
  1452. id
  1453. objc_opt_self(id obj)
  1454. {
  1455. #if __OBJC2__
  1456. if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) {
  1457. return obj;
  1458. }
  1459. #endif
  1460. return ((id(*)(id, SEL))objc_msgSend)(obj, @selector(self));
  1461. }
  1462. // Calls [obj class]
  1463. Class
  1464. objc_opt_class(id obj)
  1465. {
  1466. #if __OBJC2__
  1467. if (slowpath(!obj)) return nil;
  1468. Class cls = obj->getIsa();
  1469. if (fastpath(!cls->hasCustomCore())) {
  1470. return cls->isMetaClass() ? obj : cls;
  1471. }
  1472. #endif
  1473. return ((Class(*)(id, SEL))objc_msgSend)(obj, @selector(class));
  1474. }
  1475. // Calls [obj isKindOfClass]
  1476. BOOL
  1477. objc_opt_isKindOfClass(id obj, Class otherClass)
  1478. {
  1479. #if __OBJC2__
  1480. if (slowpath(!obj)) return NO;
  1481. Class cls = obj->getIsa();
  1482. if (fastpath(!cls->hasCustomCore())) {
  1483. for (Class tcls = cls; tcls; tcls = tcls->superclass) {
  1484. if (tcls == otherClass) return YES;
  1485. }
  1486. return NO;
  1487. }
  1488. #endif
  1489. return ((BOOL(*)(id, SEL, Class))objc_msgSend)(obj, @selector(isKindOfClass:), otherClass);
  1490. }
  1491. // Calls [obj respondsToSelector]
  1492. BOOL
  1493. objc_opt_respondsToSelector(id obj, SEL sel)
  1494. {
  1495. #if __OBJC2__
  1496. if (slowpath(!obj)) return NO;
  1497. Class cls = obj->getIsa();
  1498. if (fastpath(!cls->hasCustomCore())) {
  1499. return class_respondsToSelector_inst(obj, sel, cls);
  1500. }
  1501. #endif
  1502. return ((BOOL(*)(id, SEL, SEL))objc_msgSend)(obj, @selector(respondsToSelector:), sel);
  1503. }
  1504. void
  1505. _objc_rootDealloc(id obj)
  1506. {
  1507. ASSERT(obj);
  1508. obj->rootDealloc();
  1509. }
  1510. void
  1511. _objc_rootFinalize(id obj __unused)
  1512. {
  1513. ASSERT(obj);
  1514. _objc_fatal("_objc_rootFinalize called with garbage collection off");
  1515. }
  1516. id
  1517. _objc_rootInit(id obj)
  1518. {
  1519. // In practice, it will be hard to rely on this function.
  1520. // Many classes do not properly chain -init calls.
  1521. return obj;
  1522. }
  1523. malloc_zone_t *
  1524. _objc_rootZone(id obj)
  1525. {
  1526. (void)obj;
  1527. #if __OBJC2__
  1528. // allocWithZone under __OBJC2__ ignores the zone parameter
  1529. return malloc_default_zone();
  1530. #else
  1531. malloc_zone_t *rval = malloc_zone_from_ptr(obj);
  1532. return rval ? rval : malloc_default_zone();
  1533. #endif
  1534. }
  1535. uintptr_t
  1536. _objc_rootHash(id obj)
  1537. {
  1538. return (uintptr_t)obj;
  1539. }
  1540. void *
  1541. objc_autoreleasePoolPush(void)
  1542. {
  1543. return AutoreleasePoolPage::push();
  1544. }
  1545. NEVER_INLINE
  1546. void
  1547. objc_autoreleasePoolPop(void *ctxt)
  1548. {
  1549. AutoreleasePoolPage::pop(ctxt);
  1550. }
  1551. void *
  1552. _objc_autoreleasePoolPush(void)
  1553. {
  1554. return objc_autoreleasePoolPush();
  1555. }
  1556. void
  1557. _objc_autoreleasePoolPop(void *ctxt)
  1558. {
  1559. objc_autoreleasePoolPop(ctxt);
  1560. }
  1561. void
  1562. _objc_autoreleasePoolPrint(void)
  1563. {
  1564. AutoreleasePoolPage::printAll();
  1565. }
  1566. // Same as objc_release but suitable for tail-calling
  1567. // if you need the value back and don't want to push a frame before this point.
  1568. __attribute__((noinline))
  1569. static id
  1570. objc_releaseAndReturn(id obj)
  1571. {
  1572. objc_release(obj);
  1573. return obj;
  1574. }
  1575. // Same as objc_retainAutorelease but suitable for tail-calling
  1576. // if you don't want to push a frame before this point.
  1577. __attribute__((noinline))
  1578. static id
  1579. objc_retainAutoreleaseAndReturn(id obj)
  1580. {
  1581. return objc_retainAutorelease(obj);
  1582. }
  1583. // Prepare a value at +1 for return through a +0 autoreleasing convention.
  1584. id
  1585. objc_autoreleaseReturnValue(id obj)
  1586. {
  1587. if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
  1588. return objc_autorelease(obj);
  1589. }
  1590. // Prepare a value at +0 for return through a +0 autoreleasing convention.
  1591. id
  1592. objc_retainAutoreleaseReturnValue(id obj)
  1593. {
  1594. if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
  1595. // not objc_autoreleaseReturnValue(objc_retain(obj))
  1596. // because we don't need another optimization attempt
  1597. return objc_retainAutoreleaseAndReturn(obj);
  1598. }
  1599. // Accept a value returned through a +0 autoreleasing convention for use at +1.
  1600. id
  1601. objc_retainAutoreleasedReturnValue(id obj)
  1602. {
  1603. if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
  1604. return objc_retain(obj);
  1605. }
  1606. // Accept a value returned through a +0 autoreleasing convention for use at +0.
  1607. id
  1608. objc_unsafeClaimAutoreleasedReturnValue(id obj)
  1609. {
  1610. if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
  1611. return objc_releaseAndReturn(obj);
  1612. }
  1613. id
  1614. objc_retainAutorelease(id obj)
  1615. {
  1616. return objc_autorelease(objc_retain(obj));
  1617. }
  1618. void
  1619. _objc_deallocOnMainThreadHelper(void *context)
  1620. {
  1621. id obj = (id)context;
  1622. [obj dealloc];
  1623. }
  1624. // convert objc_objectptr_t to id, callee must take ownership.
  1625. id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
  1626. // convert objc_objectptr_t to id, without ownership transfer.
  1627. id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
  1628. // convert id to objc_objectptr_t, no ownership transfer.
  1629. objc_objectptr_t objc_unretainedPointer(id object) { return object; }
  1630. void arr_init(void)
  1631. {
  1632. AutoreleasePoolPage::init();
  1633. SideTablesMap.init();
  1634. _objc_associations_init();
  1635. }
  1636. #if SUPPORT_TAGGED_POINTERS
  1637. // Placeholder for old debuggers. When they inspect an
  1638. // extended tagged pointer object they will see this isa.
  1639. @interface __NSUnrecognizedTaggedPointer : NSObject
  1640. @end
  1641. __attribute__((objc_nonlazy_class))
  1642. @implementation __NSUnrecognizedTaggedPointer
  1643. -(id) retain { return self; }
  1644. -(oneway void) release { }
  1645. -(id) autorelease { return self; }
  1646. @end
  1647. #endif
  1648. __attribute__((objc_nonlazy_class))
  1649. @implementation NSObject
  1650. + (void)initialize {
  1651. }
  1652. + (id)self {
  1653. return (id)self;
  1654. }
  1655. - (id)self {
  1656. return self;
  1657. }
  1658. + (Class)class {
  1659. return self;
  1660. }
  1661. - (Class)class {
  1662. return object_getClass(self);
  1663. }
  1664. + (Class)superclass {
  1665. return self->superclass;
  1666. }
  1667. - (Class)superclass {
  1668. return [self class]->superclass;
  1669. }
  1670. + (BOOL)isMemberOfClass:(Class)cls {
  1671. return self->ISA() == cls;
  1672. }
  1673. - (BOOL)isMemberOfClass:(Class)cls {
  1674. return [self class] == cls;
  1675. }
  1676. + (BOOL)isKindOfClass:(Class)cls {
  1677. for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) {
  1678. if (tcls == cls) return YES;
  1679. }
  1680. return NO;
  1681. }
  1682. - (BOOL)isKindOfClass:(Class)cls {
  1683. for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
  1684. if (tcls == cls) return YES;
  1685. }
  1686. return NO;
  1687. }
  1688. + (BOOL)isSubclassOfClass:(Class)cls {
  1689. for (Class tcls = self; tcls; tcls = tcls->superclass) {
  1690. if (tcls == cls) return YES;
  1691. }
  1692. return NO;
  1693. }
  1694. + (BOOL)isAncestorOfObject:(NSObject *)obj {
  1695. for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
  1696. if (tcls == self) return YES;
  1697. }
  1698. return NO;
  1699. }
  1700. + (BOOL)instancesRespondToSelector:(SEL)sel {
  1701. return class_respondsToSelector_inst(nil, sel, self);
  1702. }
  1703. + (BOOL)respondsToSelector:(SEL)sel {
  1704. return class_respondsToSelector_inst(self, sel, self->ISA());
  1705. }
  1706. - (BOOL)respondsToSelector:(SEL)sel {
  1707. return class_respondsToSelector_inst(self, sel, [self class]);
  1708. }
  1709. + (BOOL)conformsToProtocol:(Protocol *)protocol {
  1710. if (!protocol) return NO;
  1711. for (Class tcls = self; tcls; tcls = tcls->superclass) {
  1712. if (class_conformsToProtocol(tcls, protocol)) return YES;
  1713. }
  1714. return NO;
  1715. }
  1716. - (BOOL)conformsToProtocol:(Protocol *)protocol {
  1717. if (!protocol) return NO;
  1718. for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
  1719. if (class_conformsToProtocol(tcls, protocol)) return YES;
  1720. }
  1721. return NO;
  1722. }
  1723. + (NSUInteger)hash {
  1724. return _objc_rootHash(self);
  1725. }
  1726. - (NSUInteger)hash {
  1727. return _objc_rootHash(self);
  1728. }
  1729. + (BOOL)isEqual:(id)obj {
  1730. return obj == (id)self;
  1731. }
  1732. - (BOOL)isEqual:(id)obj {
  1733. return obj == self;
  1734. }
  1735. + (BOOL)isFault {
  1736. return NO;
  1737. }
  1738. - (BOOL)isFault {
  1739. return NO;
  1740. }
  1741. + (BOOL)isProxy {
  1742. return NO;
  1743. }
  1744. - (BOOL)isProxy {
  1745. return NO;
  1746. }
  1747. + (IMP)instanceMethodForSelector:(SEL)sel {
  1748. if (!sel) [self doesNotRecognizeSelector:sel];
  1749. return class_getMethodImplementation(self, sel);
  1750. }
  1751. + (IMP)methodForSelector:(SEL)sel {
  1752. if (!sel) [self doesNotRecognizeSelector:sel];
  1753. return object_getMethodImplementation((id)self, sel);
  1754. }
  1755. - (IMP)methodForSelector:(SEL)sel {
  1756. if (!sel) [self doesNotRecognizeSelector:sel];
  1757. return object_getMethodImplementation(self, sel);
  1758. }
  1759. + (BOOL)resolveClassMethod:(SEL)sel {
  1760. return NO;
  1761. }
  1762. + (BOOL)resolveInstanceMethod:(SEL)sel {
  1763. return NO;
  1764. }
  1765. // Replaced by CF (throws an NSException)
  1766. + (void)doesNotRecognizeSelector:(SEL)sel {
  1767. _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
  1768. class_getName(self), sel_getName(sel), self);
  1769. }
  1770. // Replaced by CF (throws an NSException)
  1771. - (void)doesNotRecognizeSelector:(SEL)sel {
  1772. _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
  1773. object_getClassName(self), sel_getName(sel), self);
  1774. }
  1775. + (id)performSelector:(SEL)sel {
  1776. if (!sel) [self doesNotRecognizeSelector:sel];
  1777. return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
  1778. }
  1779. + (id)performSelector:(SEL)sel withObject:(id)obj {
  1780. if (!sel) [self doesNotRecognizeSelector:sel];
  1781. return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
  1782. }
  1783. + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
  1784. if (!sel) [self doesNotRecognizeSelector:sel];
  1785. return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
  1786. }
  1787. - (id)performSelector:(SEL)sel {
  1788. if (!sel) [self doesNotRecognizeSelector:sel];
  1789. return ((id(*)(id, SEL))objc_msgSend)(self, sel);
  1790. }
  1791. - (id)performSelector:(SEL)sel withObject:(id)obj {
  1792. if (!sel) [self doesNotRecognizeSelector:sel];
  1793. return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
  1794. }
  1795. - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
  1796. if (!sel) [self doesNotRecognizeSelector:sel];
  1797. return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
  1798. }
  1799. // Replaced by CF (returns an NSMethodSignature)
  1800. + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
  1801. _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
  1802. "not available without CoreFoundation");
  1803. }
  1804. // Replaced by CF (returns an NSMethodSignature)
  1805. + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
  1806. _objc_fatal("+[NSObject methodSignatureForSelector:] "
  1807. "not available without CoreFoundation");
  1808. }
  1809. // Replaced by CF (returns an NSMethodSignature)
  1810. - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
  1811. _objc_fatal("-[NSObject methodSignatureForSelector:] "
  1812. "not available without CoreFoundation");
  1813. }
  1814. + (void)forwardInvocation:(NSInvocation *)invocation {
  1815. [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
  1816. }
  1817. - (void)forwardInvocation:(NSInvocation *)invocation {
  1818. [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
  1819. }
  1820. + (id)forwardingTargetForSelector:(SEL)sel {
  1821. return nil;
  1822. }
  1823. - (id)forwardingTargetForSelector:(SEL)sel {
  1824. return nil;
  1825. }
  1826. // Replaced by CF (returns an NSString)
  1827. + (NSString *)description {
  1828. return nil;
  1829. }
  1830. // Replaced by CF (returns an NSString)
  1831. - (NSString *)description {
  1832. return nil;
  1833. }
  1834. + (NSString *)debugDescription {
  1835. return [self description];
  1836. }
  1837. - (NSString *)debugDescription {
  1838. return [self description];
  1839. }
  1840. + (id)new {
  1841. return [callAlloc(self, false/*checkNil*/) init];
  1842. }
  1843. + (id)retain {
  1844. return (id)self;
  1845. }
  1846. // Replaced by ObjectAlloc
  1847. - (id)retain {
  1848. return _objc_rootRetain(self);
  1849. }
  1850. + (BOOL)_tryRetain {
  1851. return YES;
  1852. }
  1853. // Replaced by ObjectAlloc
  1854. - (BOOL)_tryRetain {
  1855. return _objc_rootTryRetain(self);
  1856. }
  1857. + (BOOL)_isDeallocating {
  1858. return NO;
  1859. }
  1860. - (BOOL)_isDeallocating {
  1861. return _objc_rootIsDeallocating(self);
  1862. }
  1863. + (BOOL)allowsWeakReference {
  1864. return YES;
  1865. }
  1866. + (BOOL)retainWeakReference {
  1867. return YES;
  1868. }
  1869. - (BOOL)allowsWeakReference {
  1870. return ! [self _isDeallocating];
  1871. }
  1872. - (BOOL)retainWeakReference {
  1873. return [self _tryRetain];
  1874. }
  1875. + (oneway void)release {
  1876. }
  1877. // Replaced by ObjectAlloc
  1878. - (oneway void)release {
  1879. _objc_rootRelease(self);
  1880. }
  1881. + (id)autorelease {
  1882. return (id)self;
  1883. }
  1884. // Replaced by ObjectAlloc
  1885. - (id)autorelease {
  1886. return _objc_rootAutorelease(self);
  1887. }
  1888. + (NSUInteger)retainCount {
  1889. return ULONG_MAX;
  1890. }
  1891. - (NSUInteger)retainCount {
  1892. return _objc_rootRetainCount(self);
  1893. }
  1894. + (id)alloc {
  1895. return _objc_rootAlloc(self);
  1896. }
  1897. // Replaced by ObjectAlloc
  1898. + (id)allocWithZone:(struct _NSZone *)zone {
  1899. return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
  1900. }
  1901. // Replaced by CF (throws an NSException)
  1902. + (id)init {
  1903. return (id)self;
  1904. }
  1905. - (id)init {
  1906. return _objc_rootInit(self);
  1907. }
  1908. // Replaced by CF (throws an NSException)
  1909. + (void)dealloc {
  1910. }
  1911. // Replaced by NSZombies
  1912. - (void)dealloc {
  1913. _objc_rootDealloc(self);
  1914. }
  1915. // Previously used by GC. Now a placeholder for binary compatibility.
  1916. - (void) finalize {
  1917. }
  1918. + (struct _NSZone *)zone {
  1919. return (struct _NSZone *)_objc_rootZone(self);
  1920. }
  1921. - (struct _NSZone *)zone {
  1922. return (struct _NSZone *)_objc_rootZone(self);
  1923. }
  1924. + (id)copy {
  1925. return (id)self;
  1926. }
  1927. + (id)copyWithZone:(struct _NSZone *)zone {
  1928. return (id)self;
  1929. }
  1930. - (id)copy {
  1931. return [(id)self copyWithZone:nil];
  1932. }
  1933. + (id)mutableCopy {
  1934. return (id)self;
  1935. }
  1936. + (id)mutableCopyWithZone:(struct _NSZone *)zone {
  1937. return (id)self;
  1938. }
  1939. - (id)mutableCopy {
  1940. return [(id)self mutableCopyWithZone:nil];
  1941. }
  1942. @end