NSObject.mm 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360
  1. /*
  2. * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. #include "objc-private.h"
  24. #include "NSObject.h"
  25. #include "objc-weak.h"
  26. #include "llvm-DenseMap.h"
  27. #include "NSObject.h"
  28. #include <malloc/malloc.h>
  29. #include <stdint.h>
  30. #include <stdbool.h>
  31. #include <mach/mach.h>
  32. #include <mach-o/dyld.h>
  33. #include <mach-o/nlist.h>
  34. #include <sys/types.h>
  35. #include <sys/mman.h>
  36. #include <libkern/OSAtomic.h>
  37. #include <Block.h>
  38. #include <map>
  39. #include <execinfo.h>
  40. @interface NSInvocation
  41. - (SEL)selector;
  42. @end
  43. /***********************************************************************
  44. * Weak ivar support
  45. **********************************************************************/
  46. static id defaultBadAllocHandler(Class cls)
  47. {
  48. _objc_fatal("attempt to allocate object of class '%s' failed",
  49. cls->nameForLogging());
  50. }
  51. static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
  52. static id callBadAllocHandler(Class cls)
  53. {
  54. // fixme add re-entrancy protection in case allocation fails inside handler
  55. return (*badAllocHandler)(cls);
  56. }
  57. void _objc_setBadAllocHandler(id(*newHandler)(Class))
  58. {
  59. badAllocHandler = newHandler;
  60. }
  61. namespace {
  62. // The order of these bits is important.
  63. #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
  64. #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
  65. #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
  66. #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
  67. #define SIDE_TABLE_RC_SHIFT 2
  68. #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
  69. // RefcountMap disguises its pointers because we
  70. // don't want the table to act as a root for `leaks`.
  71. typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
  72. // Template parameters.
  73. enum HaveOld { DontHaveOld = false, DoHaveOld = true };
  74. enum HaveNew { DontHaveNew = false, DoHaveNew = true };
  75. struct SideTable {
  76. spinlock_t slock;
  77. RefcountMap refcnts;
  78. weak_table_t weak_table;
  79. SideTable() {
  80. memset(&weak_table, 0, sizeof(weak_table));
  81. }
  82. ~SideTable() {
  83. _objc_fatal("Do not delete SideTable.");
  84. }
  85. void lock() { slock.lock(); }
  86. void unlock() { slock.unlock(); }
  87. void forceReset() { slock.forceReset(); }
  88. // Address-ordered lock discipline for a pair of side tables.
  89. template<HaveOld, HaveNew>
  90. static void lockTwo(SideTable *lock1, SideTable *lock2);
  91. template<HaveOld, HaveNew>
  92. static void unlockTwo(SideTable *lock1, SideTable *lock2);
  93. };
  94. template<>
  95. void SideTable::lockTwo<DoHaveOld, DoHaveNew>
  96. (SideTable *lock1, SideTable *lock2)
  97. {
  98. spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
  99. }
  100. template<>
  101. void SideTable::lockTwo<DoHaveOld, DontHaveNew>
  102. (SideTable *lock1, SideTable *)
  103. {
  104. lock1->lock();
  105. }
  106. template<>
  107. void SideTable::lockTwo<DontHaveOld, DoHaveNew>
  108. (SideTable *, SideTable *lock2)
  109. {
  110. lock2->lock();
  111. }
  112. template<>
  113. void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
  114. (SideTable *lock1, SideTable *lock2)
  115. {
  116. spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
  117. }
  118. template<>
  119. void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
  120. (SideTable *lock1, SideTable *)
  121. {
  122. lock1->unlock();
  123. }
  124. template<>
  125. void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
  126. (SideTable *, SideTable *lock2)
  127. {
  128. lock2->unlock();
  129. }
  130. // We cannot use a C++ static initializer to initialize SideTables because
  131. // libc calls us before our C++ initializers run. We also don't want a global
  132. // pointer to this struct because of the extra indirection.
  133. // Do it the hard way.
  134. alignas(StripedMap<SideTable>) static uint8_t
  135. SideTableBuf[sizeof(StripedMap<SideTable>)];
  136. static void SideTableInit() {
  137. new (SideTableBuf) StripedMap<SideTable>();
  138. }
  139. static StripedMap<SideTable>& SideTables() {
  140. return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
  141. }
  142. // anonymous namespace
  143. };
  144. void SideTableLockAll() {
  145. SideTables().lockAll();
  146. }
  147. void SideTableUnlockAll() {
  148. SideTables().unlockAll();
  149. }
  150. void SideTableForceResetAll() {
  151. SideTables().forceResetAll();
  152. }
  153. void SideTableDefineLockOrder() {
  154. SideTables().defineLockOrder();
  155. }
  156. void SideTableLocksPrecedeLock(const void *newlock) {
  157. SideTables().precedeLock(newlock);
  158. }
  159. void SideTableLocksSucceedLock(const void *oldlock) {
  160. SideTables().succeedLock(oldlock);
  161. }
  162. void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
  163. int i = 0;
  164. const void *newlock;
  165. while ((newlock = newlocks.getLock(i++))) {
  166. SideTables().precedeLock(newlock);
  167. }
  168. }
  169. void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
  170. int i = 0;
  171. const void *oldlock;
  172. while ((oldlock = oldlocks.getLock(i++))) {
  173. SideTables().succeedLock(oldlock);
  174. }
  175. }
  176. //
  177. // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
  178. //
  179. id objc_retainBlock(id x) {
  180. return (id)_Block_copy(x);
  181. }
  182. //
  183. // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
  184. //
  185. BOOL objc_should_deallocate(id object) {
  186. return YES;
  187. }
  188. id
  189. objc_retain_autorelease(id obj)
  190. {
  191. return objc_autorelease(objc_retain(obj));
  192. }
  193. void
  194. objc_storeStrong(id *location, id obj)
  195. {
  196. id prev = *location;
  197. if (obj == prev) {
  198. return;
  199. }
  200. objc_retain(obj);
  201. *location = obj;
  202. objc_release(prev);
  203. }
  204. // Update a weak variable.
  205. // If HaveOld is true, the variable has an existing value
  206. // that needs to be cleaned up. This value might be nil.
  207. // If HaveNew is true, there is a new value that needs to be
  208. // assigned into the variable. This value might be nil.
  209. // If CrashIfDeallocating is true, the process is halted if newObj is
  210. // deallocating or newObj's class does not support weak references.
  211. // If CrashIfDeallocating is false, nil is stored instead.
  212. enum CrashIfDeallocating {
  213. DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
  214. };
  215. template <HaveOld haveOld, HaveNew haveNew,
  216. CrashIfDeallocating crashIfDeallocating>
  217. static id
  218. storeWeak(id *location, objc_object *newObj)
  219. {
  220. assert(haveOld || haveNew);
  221. if (!haveNew) assert(newObj == nil);
  222. Class previouslyInitializedClass = nil;
  223. id oldObj;
  224. SideTable *oldTable;
  225. SideTable *newTable;
  226. // Acquire locks for old and new values.
  227. // Order by lock address to prevent lock ordering problems.
  228. // Retry if the old value changes underneath us.
  229. retry:
  230. if (haveOld) {
  231. oldObj = *location;
  232. oldTable = &SideTables()[oldObj];
  233. } else {
  234. oldTable = nil;
  235. }
  236. if (haveNew) {
  237. newTable = &SideTables()[newObj];
  238. } else {
  239. newTable = nil;
  240. }
  241. SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
  242. if (haveOld && *location != oldObj) {
  243. SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
  244. goto retry;
  245. }
  246. // Prevent a deadlock between the weak reference machinery
  247. // and the +initialize machinery by ensuring that no
  248. // weakly-referenced object has an un-+initialized isa.
  249. if (haveNew && newObj) {
  250. Class cls = newObj->getIsa();
  251. if (cls != previouslyInitializedClass &&
  252. !((objc_class *)cls)->isInitialized())
  253. {
  254. SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
  255. _class_initialize(_class_getNonMetaClass(cls, (id)newObj));
  256. // If this class is finished with +initialize then we're good.
  257. // If this class is still running +initialize on this thread
  258. // (i.e. +initialize called storeWeak on an instance of itself)
  259. // then we may proceed but it will appear initializing and
  260. // not yet initialized to the check above.
  261. // Instead set previouslyInitializedClass to recognize it on retry.
  262. previouslyInitializedClass = cls;
  263. goto retry;
  264. }
  265. }
  266. // Clean up old value, if any.
  267. if (haveOld) {
  268. weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
  269. }
  270. // Assign new value, if any.
  271. if (haveNew) {
  272. newObj = (objc_object *)
  273. weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
  274. crashIfDeallocating);
  275. // weak_register_no_lock returns nil if weak store should be rejected
  276. // Set is-weakly-referenced bit in refcount table.
  277. if (newObj && !newObj->isTaggedPointer()) {
  278. newObj->setWeaklyReferenced_nolock();
  279. }
  280. // Do not set *location anywhere else. That would introduce a race.
  281. *location = (id)newObj;
  282. }
  283. else {
  284. // No new value. The storage is not changed.
  285. }
  286. SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
  287. return (id)newObj;
  288. }
  289. /**
  290. * This function stores a new value into a __weak variable. It would
  291. * be used anywhere a __weak variable is the target of an assignment.
  292. *
  293. * @param location The address of the weak pointer itself
  294. * @param newObj The new object this weak ptr should now point to
  295. *
  296. * @return \e newObj
  297. */
  298. id
  299. objc_storeWeak(id *location, id newObj)
  300. {
  301. return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
  302. (location, (objc_object *)newObj);
  303. }
  304. /**
  305. * This function stores a new value into a __weak variable.
  306. * If the new object is deallocating or the new object's class
  307. * does not support weak references, stores nil instead.
  308. *
  309. * @param location The address of the weak pointer itself
  310. * @param newObj The new object this weak ptr should now point to
  311. *
  312. * @return The value stored (either the new object or nil)
  313. */
  314. id
  315. objc_storeWeakOrNil(id *location, id newObj)
  316. {
  317. return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
  318. (location, (objc_object *)newObj);
  319. }
  320. /**
  321. * Initialize a fresh weak pointer to some object location.
  322. * It would be used for code like:
  323. *
  324. * (The nil case)
  325. * __weak id weakPtr;
  326. * (The non-nil case)
  327. * NSObject *o = ...;
  328. * __weak id weakPtr = o;
  329. *
  330. * This function IS NOT thread-safe with respect to concurrent
  331. * modifications to the weak variable. (Concurrent weak clear is safe.)
  332. *
  333. * @param location Address of __weak ptr.
  334. * @param newObj Object ptr.
  335. */
  336. id
  337. objc_initWeak(id *location, id newObj)
  338. {
  339. if (!newObj) {
  340. *location = nil;
  341. return nil;
  342. }
  343. return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
  344. (location, (objc_object*)newObj);
  345. }
  346. id
  347. objc_initWeakOrNil(id *location, id newObj)
  348. {
  349. if (!newObj) {
  350. *location = nil;
  351. return nil;
  352. }
  353. return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
  354. (location, (objc_object*)newObj);
  355. }
  356. /**
  357. * Destroys the relationship between a weak pointer
  358. * and the object it is referencing in the internal weak
  359. * table. If the weak pointer is not referencing anything,
  360. * there is no need to edit the weak table.
  361. *
  362. * This function IS NOT thread-safe with respect to concurrent
  363. * modifications to the weak variable. (Concurrent weak clear is safe.)
  364. *
  365. * @param location The weak pointer address.
  366. */
  367. void
  368. objc_destroyWeak(id *location)
  369. {
  370. (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
  371. (location, nil);
  372. }
  373. /*
  374. Once upon a time we eagerly cleared *location if we saw the object
  375. was deallocating. This confuses code like NSPointerFunctions which
  376. tries to pre-flight the raw storage and assumes if the storage is
  377. zero then the weak system is done interfering. That is false: the
  378. weak system is still going to check and clear the storage later.
  379. This can cause objc_weak_error complaints and crashes.
  380. So we now don't touch the storage until deallocation completes.
  381. */
  382. id
  383. objc_loadWeakRetained(id *location)
  384. {
  385. id obj;
  386. id result;
  387. Class cls;
  388. SideTable *table;
  389. retry:
  390. // fixme std::atomic this load
  391. obj = *location;
  392. if (!obj) return nil;
  393. if (obj->isTaggedPointer()) return obj;
  394. table = &SideTables()[obj];
  395. table->lock();
  396. if (*location != obj) {
  397. table->unlock();
  398. goto retry;
  399. }
  400. result = obj;
  401. cls = obj->ISA();
  402. if (! cls->hasCustomRR()) {
  403. // Fast case. We know +initialize is complete because
  404. // default-RR can never be set before then.
  405. assert(cls->isInitialized());
  406. if (! obj->rootTryRetain()) {
  407. result = nil;
  408. }
  409. }
  410. else {
  411. // Slow case. We must check for +initialize and call it outside
  412. // the lock if necessary in order to avoid deadlocks.
  413. if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
  414. BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
  415. class_getMethodImplementation(cls, SEL_retainWeakReference);
  416. if ((IMP)tryRetain == _objc_msgForward) {
  417. result = nil;
  418. }
  419. else if (! (*tryRetain)(obj, SEL_retainWeakReference)) {
  420. result = nil;
  421. }
  422. }
  423. else {
  424. table->unlock();
  425. _class_initialize(cls);
  426. goto retry;
  427. }
  428. }
  429. table->unlock();
  430. return result;
  431. }
  432. /**
  433. * This loads the object referenced by a weak pointer and returns it, after
  434. * retaining and autoreleasing the object to ensure that it stays alive
  435. * long enough for the caller to use it. This function would be used
  436. * anywhere a __weak variable is used in an expression.
  437. *
  438. * @param location The weak pointer address
  439. *
  440. * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
  441. */
  442. id
  443. objc_loadWeak(id *location)
  444. {
  445. if (!*location) return nil;
  446. return objc_autorelease(objc_loadWeakRetained(location));
  447. }
  448. /**
  449. * This function copies a weak pointer from one location to another,
  450. * when the destination doesn't already contain a weak pointer. It
  451. * would be used for code like:
  452. *
  453. * __weak id src = ...;
  454. * __weak id dst = src;
  455. *
  456. * This function IS NOT thread-safe with respect to concurrent
  457. * modifications to the destination variable. (Concurrent weak clear is safe.)
  458. *
  459. * @param dst The destination variable.
  460. * @param src The source variable.
  461. */
  462. void
  463. objc_copyWeak(id *dst, id *src)
  464. {
  465. id obj = objc_loadWeakRetained(src);
  466. objc_initWeak(dst, obj);
  467. objc_release(obj);
  468. }
  469. /**
  470. * Move a weak pointer from one location to another.
  471. * Before the move, the destination must be uninitialized.
  472. * After the move, the source is nil.
  473. *
  474. * This function IS NOT thread-safe with respect to concurrent
  475. * modifications to either weak variable. (Concurrent weak clear is safe.)
  476. *
  477. */
  478. void
  479. objc_moveWeak(id *dst, id *src)
  480. {
  481. objc_copyWeak(dst, src);
  482. objc_destroyWeak(src);
  483. *src = nil;
  484. }
  485. /***********************************************************************
  486. Autorelease pool implementation
  487. A thread's autorelease pool is a stack of pointers.
  488. Each pointer is either an object to release, or POOL_BOUNDARY which is
  489. an autorelease pool boundary.
  490. A pool token is a pointer to the POOL_BOUNDARY for that pool. When
  491. the pool is popped, every object hotter than the sentinel is released.
  492. The stack is divided into a doubly-linked list of pages. Pages are added
  493. and deleted as necessary.
  494. Thread-local storage points to the hot page, where newly autoreleased
  495. objects are stored.
  496. **********************************************************************/
  497. // Set this to 1 to mprotect() autorelease pool contents
  498. #define PROTECT_AUTORELEASEPOOL 0
  499. // Set this to 1 to validate the entire autorelease pool header all the time
  500. // (i.e. use check() instead of fastcheck() everywhere)
  501. #define CHECK_AUTORELEASEPOOL (DEBUG)
  502. BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
  503. BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
  504. namespace {
  505. struct magic_t {
  506. static const uint32_t M0 = 0xA1A1A1A1;
  507. # define M1 "AUTORELEASE!"
  508. static const size_t M1_len = 12;
  509. uint32_t m[4];
  510. magic_t() {
  511. assert(M1_len == strlen(M1));
  512. assert(M1_len == 3 * sizeof(m[1]));
  513. m[0] = M0;
  514. strncpy((char *)&m[1], M1, M1_len);
  515. }
  516. ~magic_t() {
  517. m[0] = m[1] = m[2] = m[3] = 0;
  518. }
  519. bool check() const {
  520. return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
  521. }
  522. bool fastcheck() const {
  523. #if CHECK_AUTORELEASEPOOL
  524. return check();
  525. #else
  526. return (m[0] == M0);
  527. #endif
  528. }
  529. # undef M1
  530. };
  531. class AutoreleasePoolPage
  532. {
  533. // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
  534. // pushed and it has never contained any objects. This saves memory
  535. // when the top level (i.e. libdispatch) pushes and pops pools but
  536. // never uses them.
  537. # define EMPTY_POOL_PLACEHOLDER ((id*)1)
  538. # define POOL_BOUNDARY nil
  539. static pthread_key_t const key = AUTORELEASE_POOL_KEY;
  540. static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
  541. static size_t const SIZE =
  542. #if PROTECT_AUTORELEASEPOOL
  543. PAGE_MAX_SIZE; // must be multiple of vm page size
  544. #else
  545. PAGE_MAX_SIZE; // size and alignment, power of 2
  546. #endif
  547. static size_t const COUNT = SIZE / sizeof(id);
  548. magic_t const magic;
  549. id *next;
  550. pthread_t const thread;
  551. AutoreleasePoolPage * const parent;
  552. AutoreleasePoolPage *child;
  553. uint32_t const depth;
  554. uint32_t hiwat;
  555. // SIZE-sizeof(*this) bytes of contents follow
  556. static void * operator new(size_t size) {
  557. return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
  558. }
  559. static void operator delete(void * p) {
  560. return free(p);
  561. }
  562. inline void protect() {
  563. #if PROTECT_AUTORELEASEPOOL
  564. mprotect(this, SIZE, PROT_READ);
  565. check();
  566. #endif
  567. }
  568. inline void unprotect() {
  569. #if PROTECT_AUTORELEASEPOOL
  570. check();
  571. mprotect(this, SIZE, PROT_READ | PROT_WRITE);
  572. #endif
  573. }
  574. AutoreleasePoolPage(AutoreleasePoolPage *newParent)
  575. : magic(), next(begin()), thread(pthread_self()),
  576. parent(newParent), child(nil),
  577. depth(parent ? 1+parent->depth : 0),
  578. hiwat(parent ? parent->hiwat : 0)
  579. {
  580. if (parent) {
  581. parent->check();
  582. assert(!parent->child);
  583. parent->unprotect();
  584. parent->child = this;
  585. parent->protect();
  586. }
  587. protect();
  588. }
  589. ~AutoreleasePoolPage()
  590. {
  591. check();
  592. unprotect();
  593. assert(empty());
  594. // Not recursive: we don't want to blow out the stack
  595. // if a thread accumulates a stupendous amount of garbage
  596. assert(!child);
  597. }
  598. void busted(bool die = true)
  599. {
  600. magic_t right;
  601. (die ? _objc_fatal : _objc_inform)
  602. ("autorelease pool page %p corrupted\n"
  603. " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
  604. " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
  605. " pthread %p\n"
  606. " should be %p\n",
  607. this,
  608. magic.m[0], magic.m[1], magic.m[2], magic.m[3],
  609. right.m[0], right.m[1], right.m[2], right.m[3],
  610. this->thread, pthread_self());
  611. }
  612. void check(bool die = true)
  613. {
  614. if (!magic.check() || !pthread_equal(thread, pthread_self())) {
  615. busted(die);
  616. }
  617. }
  618. void fastcheck(bool die = true)
  619. {
  620. #if CHECK_AUTORELEASEPOOL
  621. check(die);
  622. #else
  623. if (! magic.fastcheck()) {
  624. busted(die);
  625. }
  626. #endif
  627. }
  628. id * begin() {
  629. return (id *) ((uint8_t *)this+sizeof(*this));
  630. }
  631. id * end() {
  632. return (id *) ((uint8_t *)this+SIZE);
  633. }
  634. bool empty() {
  635. return next == begin();
  636. }
  637. bool full() {
  638. return next == end();
  639. }
  640. bool lessThanHalfFull() {
  641. return (next - begin() < (end() - begin()) / 2);
  642. }
  643. id *add(id obj)
  644. {
  645. assert(!full());
  646. unprotect();
  647. id *ret = next; // faster than `return next-1` because of aliasing
  648. *next++ = obj;
  649. protect();
  650. return ret;
  651. }
  652. void releaseAll()
  653. {
  654. releaseUntil(begin());
  655. }
  656. void releaseUntil(id *stop)
  657. {
  658. // Not recursive: we don't want to blow out the stack
  659. // if a thread accumulates a stupendous amount of garbage
  660. while (this->next != stop) {
  661. // Restart from hotPage() every time, in case -release
  662. // autoreleased more objects
  663. AutoreleasePoolPage *page = hotPage();
  664. // fixme I think this `while` can be `if`, but I can't prove it
  665. while (page->empty()) {
  666. page = page->parent;
  667. setHotPage(page);
  668. }
  669. page->unprotect();
  670. id obj = *--page->next;
  671. memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
  672. page->protect();
  673. if (obj != POOL_BOUNDARY) {
  674. objc_release(obj);
  675. }
  676. }
  677. setHotPage(this);
  678. #if DEBUG
  679. // we expect any children to be completely empty
  680. for (AutoreleasePoolPage *page = child; page; page = page->child) {
  681. assert(page->empty());
  682. }
  683. #endif
  684. }
  685. void kill()
  686. {
  687. // Not recursive: we don't want to blow out the stack
  688. // if a thread accumulates a stupendous amount of garbage
  689. AutoreleasePoolPage *page = this;
  690. while (page->child) page = page->child;
  691. AutoreleasePoolPage *deathptr;
  692. do {
  693. deathptr = page;
  694. page = page->parent;
  695. if (page) {
  696. page->unprotect();
  697. page->child = nil;
  698. page->protect();
  699. }
  700. delete deathptr;
  701. } while (deathptr != this);
  702. }
  703. static void tls_dealloc(void *p)
  704. {
  705. if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
  706. // No objects or pool pages to clean up here.
  707. return;
  708. }
  709. // reinstate TLS value while we work
  710. setHotPage((AutoreleasePoolPage *)p);
  711. if (AutoreleasePoolPage *page = coldPage()) {
  712. if (!page->empty()) pop(page->begin()); // pop all of the pools
  713. if (DebugMissingPools || DebugPoolAllocation) {
  714. // pop() killed the pages already
  715. } else {
  716. page->kill(); // free all of the pages
  717. }
  718. }
  719. // clear TLS value so TLS destruction doesn't loop
  720. setHotPage(nil);
  721. }
  722. static AutoreleasePoolPage *pageForPointer(const void *p)
  723. {
  724. return pageForPointer((uintptr_t)p);
  725. }
  726. static AutoreleasePoolPage *pageForPointer(uintptr_t p)
  727. {
  728. AutoreleasePoolPage *result;
  729. uintptr_t offset = p % SIZE;
  730. assert(offset >= sizeof(AutoreleasePoolPage));
  731. result = (AutoreleasePoolPage *)(p - offset);
  732. result->fastcheck();
  733. return result;
  734. }
  735. static inline bool haveEmptyPoolPlaceholder()
  736. {
  737. id *tls = (id *)tls_get_direct(key);
  738. return (tls == EMPTY_POOL_PLACEHOLDER);
  739. }
  740. static inline id* setEmptyPoolPlaceholder()
  741. {
  742. assert(tls_get_direct(key) == nil);
  743. tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
  744. return EMPTY_POOL_PLACEHOLDER;
  745. }
  746. static inline AutoreleasePoolPage *hotPage()
  747. {
  748. AutoreleasePoolPage *result = (AutoreleasePoolPage *)
  749. tls_get_direct(key);
  750. if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
  751. if (result) result->fastcheck();
  752. return result;
  753. }
  754. static inline void setHotPage(AutoreleasePoolPage *page)
  755. {
  756. if (page) page->fastcheck();
  757. tls_set_direct(key, (void *)page);
  758. }
  759. static inline AutoreleasePoolPage *coldPage()
  760. {
  761. AutoreleasePoolPage *result = hotPage();
  762. if (result) {
  763. while (result->parent) {
  764. result = result->parent;
  765. result->fastcheck();
  766. }
  767. }
  768. return result;
  769. }
  770. static inline id *autoreleaseFast(id obj)
  771. {
  772. AutoreleasePoolPage *page = hotPage();
  773. if (page && !page->full()) {
  774. return page->add(obj);
  775. } else if (page) {
  776. return autoreleaseFullPage(obj, page);
  777. } else {
  778. return autoreleaseNoPage(obj);
  779. }
  780. }
  781. static __attribute__((noinline))
  782. id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
  783. {
  784. // The hot page is full.
  785. // Step to the next non-full page, adding a new page if necessary.
  786. // Then add the object to that page.
  787. assert(page == hotPage());
  788. assert(page->full() || DebugPoolAllocation);
  789. do {
  790. if (page->child) page = page->child;
  791. else page = new AutoreleasePoolPage(page);
  792. } while (page->full());
  793. setHotPage(page);
  794. return page->add(obj);
  795. }
  796. static __attribute__((noinline))
  797. id *autoreleaseNoPage(id obj)
  798. {
  799. // "No page" could mean no pool has been pushed
  800. // or an empty placeholder pool has been pushed and has no contents yet
  801. assert(!hotPage());
  802. bool pushExtraBoundary = false;
  803. if (haveEmptyPoolPlaceholder()) {
  804. // We are pushing a second pool over the empty placeholder pool
  805. // or pushing the first object into the empty placeholder pool.
  806. // Before doing that, push a pool boundary on behalf of the pool
  807. // that is currently represented by the empty placeholder.
  808. pushExtraBoundary = true;
  809. }
  810. else if (obj != POOL_BOUNDARY && DebugMissingPools) {
  811. // We are pushing an object with no pool in place,
  812. // and no-pool debugging was requested by environment.
  813. _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
  814. "autoreleased with no pool in place - "
  815. "just leaking - break on "
  816. "objc_autoreleaseNoPool() to debug",
  817. pthread_self(), (void*)obj, object_getClassName(obj));
  818. objc_autoreleaseNoPool(obj);
  819. return nil;
  820. }
  821. else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
  822. // We are pushing a pool with no pool in place,
  823. // and alloc-per-pool debugging was not requested.
  824. // Install and return the empty pool placeholder.
  825. return setEmptyPoolPlaceholder();
  826. }
  827. // We are pushing an object or a non-placeholder'd pool.
  828. // Install the first page.
  829. AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
  830. setHotPage(page);
  831. // Push a boundary on behalf of the previously-placeholder'd pool.
  832. if (pushExtraBoundary) {
  833. page->add(POOL_BOUNDARY);
  834. }
  835. // Push the requested object or pool.
  836. return page->add(obj);
  837. }
  838. static __attribute__((noinline))
  839. id *autoreleaseNewPage(id obj)
  840. {
  841. AutoreleasePoolPage *page = hotPage();
  842. if (page) return autoreleaseFullPage(obj, page);
  843. else return autoreleaseNoPage(obj);
  844. }
  845. public:
  846. static inline id autorelease(id obj)
  847. {
  848. assert(obj);
  849. assert(!obj->isTaggedPointer());
  850. id *dest __unused = autoreleaseFast(obj);
  851. assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
  852. return obj;
  853. }
  854. static inline void *push()
  855. {
  856. id *dest;
  857. if (DebugPoolAllocation) {
  858. // Each autorelease pool starts on a new pool page.
  859. dest = autoreleaseNewPage(POOL_BOUNDARY);
  860. } else {
  861. dest = autoreleaseFast(POOL_BOUNDARY);
  862. }
  863. assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
  864. return dest;
  865. }
  866. static void badPop(void *token)
  867. {
  868. // Error. For bincompat purposes this is not
  869. // fatal in executables built with old SDKs.
  870. if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
  871. // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
  872. _objc_fatal
  873. ("Invalid or prematurely-freed autorelease pool %p.", token);
  874. }
  875. // Old SDK. Bad pop is warned once.
  876. static bool complained = false;
  877. if (!complained) {
  878. complained = true;
  879. _objc_inform_now_and_on_crash
  880. ("Invalid or prematurely-freed autorelease pool %p. "
  881. "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
  882. "Proceeding anyway because the app is old "
  883. "(SDK version " SDK_FORMAT "). Memory errors are likely.",
  884. token, FORMAT_SDK(sdkVersion()));
  885. }
  886. objc_autoreleasePoolInvalid(token);
  887. }
  888. static inline void pop(void *token)
  889. {
  890. AutoreleasePoolPage *page;
  891. id *stop;
  892. if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
  893. // Popping the top-level placeholder pool.
  894. if (hotPage()) {
  895. // Pool was used. Pop its contents normally.
  896. // Pool pages remain allocated for re-use as usual.
  897. pop(coldPage()->begin());
  898. } else {
  899. // Pool was never used. Clear the placeholder.
  900. setHotPage(nil);
  901. }
  902. return;
  903. }
  904. page = pageForPointer(token);
  905. stop = (id *)token;
  906. if (*stop != POOL_BOUNDARY) {
  907. if (stop == page->begin() && !page->parent) {
  908. // Start of coldest page may correctly not be POOL_BOUNDARY:
  909. // 1. top-level pool is popped, leaving the cold page in place
  910. // 2. an object is autoreleased with no pool
  911. } else {
  912. // Error. For bincompat purposes this is not
  913. // fatal in executables built with old SDKs.
  914. return badPop(token);
  915. }
  916. }
  917. if (PrintPoolHiwat) printHiwat();
  918. page->releaseUntil(stop);
  919. // memory: delete empty children
  920. if (DebugPoolAllocation && page->empty()) {
  921. // special case: delete everything during page-per-pool debugging
  922. AutoreleasePoolPage *parent = page->parent;
  923. page->kill();
  924. setHotPage(parent);
  925. } else if (DebugMissingPools && page->empty() && !page->parent) {
  926. // special case: delete everything for pop(top)
  927. // when debugging missing autorelease pools
  928. page->kill();
  929. setHotPage(nil);
  930. }
  931. else if (page->child) {
  932. // hysteresis: keep one empty child if page is more than half full
  933. if (page->lessThanHalfFull()) {
  934. page->child->kill();
  935. }
  936. else if (page->child->child) {
  937. page->child->child->kill();
  938. }
  939. }
  940. }
  941. static void init()
  942. {
  943. int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
  944. AutoreleasePoolPage::tls_dealloc);
  945. assert(r == 0);
  946. }
  947. void print()
  948. {
  949. _objc_inform("[%p] ................ PAGE %s %s %s", this,
  950. full() ? "(full)" : "",
  951. this == hotPage() ? "(hot)" : "",
  952. this == coldPage() ? "(cold)" : "");
  953. check(false);
  954. for (id *p = begin(); p < next; p++) {
  955. if (*p == POOL_BOUNDARY) {
  956. _objc_inform("[%p] ################ POOL %p", p, p);
  957. } else {
  958. _objc_inform("[%p] %#16lx %s",
  959. p, (unsigned long)*p, object_getClassName(*p));
  960. }
  961. }
  962. }
  963. static void printAll()
  964. {
  965. _objc_inform("##############");
  966. _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
  967. AutoreleasePoolPage *page;
  968. ptrdiff_t objects = 0;
  969. for (page = coldPage(); page; page = page->child) {
  970. objects += page->next - page->begin();
  971. }
  972. _objc_inform("%llu releases pending.", (unsigned long long)objects);
  973. if (haveEmptyPoolPlaceholder()) {
  974. _objc_inform("[%p] ................ PAGE (placeholder)",
  975. EMPTY_POOL_PLACEHOLDER);
  976. _objc_inform("[%p] ################ POOL (placeholder)",
  977. EMPTY_POOL_PLACEHOLDER);
  978. }
  979. else {
  980. for (page = coldPage(); page; page = page->child) {
  981. page->print();
  982. }
  983. }
  984. _objc_inform("##############");
  985. }
  986. static void printHiwat()
  987. {
  988. // Check and propagate high water mark
  989. // Ignore high water marks under 256 to suppress noise.
  990. AutoreleasePoolPage *p = hotPage();
  991. uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
  992. if (mark > p->hiwat && mark > 256) {
  993. for( ; p; p = p->parent) {
  994. p->unprotect();
  995. p->hiwat = mark;
  996. p->protect();
  997. }
  998. _objc_inform("POOL HIGHWATER: new high water mark of %u "
  999. "pending releases for thread %p:",
  1000. mark, pthread_self());
  1001. void *stack[128];
  1002. int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
  1003. char **sym = backtrace_symbols(stack, count);
  1004. for (int i = 0; i < count; i++) {
  1005. _objc_inform("POOL HIGHWATER: %s", sym[i]);
  1006. }
  1007. free(sym);
  1008. }
  1009. }
  1010. #undef POOL_BOUNDARY
  1011. };
  1012. // anonymous namespace
  1013. };
  1014. /***********************************************************************
  1015. * Slow paths for inline control
  1016. **********************************************************************/
  1017. #if SUPPORT_NONPOINTER_ISA
  1018. NEVER_INLINE id
  1019. objc_object::rootRetain_overflow(bool tryRetain)
  1020. {
  1021. return rootRetain(tryRetain, true);
  1022. }
  1023. NEVER_INLINE bool
  1024. objc_object::rootRelease_underflow(bool performDealloc)
  1025. {
  1026. return rootRelease(performDealloc, true);
  1027. }
  1028. // Slow path of clearDeallocating()
  1029. // for objects with nonpointer isa
  1030. // that were ever weakly referenced
  1031. // or whose retain count ever overflowed to the side table.
  1032. NEVER_INLINE void
  1033. objc_object::clearDeallocating_slow()
  1034. {
  1035. assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
  1036. SideTable& table = SideTables()[this];
  1037. table.lock();
  1038. if (isa.weakly_referenced) {
  1039. weak_clear_no_lock(&table.weak_table, (id)this);
  1040. }
  1041. if (isa.has_sidetable_rc) {
  1042. table.refcnts.erase(this);
  1043. }
  1044. table.unlock();
  1045. }
  1046. #endif
  1047. __attribute__((noinline,used))
  1048. id
  1049. objc_object::rootAutorelease2()
  1050. {
  1051. assert(!isTaggedPointer());
  1052. return AutoreleasePoolPage::autorelease((id)this);
  1053. }
  1054. BREAKPOINT_FUNCTION(
  1055. void objc_overrelease_during_dealloc_error(void)
  1056. );
  1057. NEVER_INLINE
  1058. bool
  1059. objc_object::overrelease_error()
  1060. {
  1061. _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
  1062. objc_overrelease_during_dealloc_error();
  1063. return false; // allow rootRelease() to tail-call this
  1064. }
  1065. /***********************************************************************
  1066. * Retain count operations for side table.
  1067. **********************************************************************/
  1068. #if DEBUG
  1069. // Used to assert that an object is not present in the side table.
  1070. bool
  1071. objc_object::sidetable_present()
  1072. {
  1073. bool result = false;
  1074. SideTable& table = SideTables()[this];
  1075. table.lock();
  1076. RefcountMap::iterator it = table.refcnts.find(this);
  1077. if (it != table.refcnts.end()) result = true;
  1078. if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
  1079. table.unlock();
  1080. return result;
  1081. }
  1082. #endif
  1083. #if SUPPORT_NONPOINTER_ISA
  1084. void
  1085. objc_object::sidetable_lock()
  1086. {
  1087. SideTable& table = SideTables()[this];
  1088. table.lock();
  1089. }
  1090. void
  1091. objc_object::sidetable_unlock()
  1092. {
  1093. SideTable& table = SideTables()[this];
  1094. table.unlock();
  1095. }
  1096. // Move the entire retain count to the side table,
  1097. // as well as isDeallocating and weaklyReferenced.
  1098. void
  1099. objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
  1100. bool isDeallocating,
  1101. bool weaklyReferenced)
  1102. {
  1103. assert(!isa.nonpointer); // should already be changed to raw pointer
  1104. SideTable& table = SideTables()[this];
  1105. size_t& refcntStorage = table.refcnts[this];
  1106. size_t oldRefcnt = refcntStorage;
  1107. // not deallocating - that was in the isa
  1108. assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
  1109. assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
  1110. uintptr_t carry;
  1111. size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
  1112. if (carry) refcnt = SIDE_TABLE_RC_PINNED;
  1113. if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
  1114. if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
  1115. refcntStorage = refcnt;
  1116. }
  1117. // Move some retain counts to the side table from the isa field.
  1118. // Returns true if the object is now pinned.
  1119. bool
  1120. objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
  1121. {
  1122. assert(isa.nonpointer);
  1123. SideTable& table = SideTables()[this];
  1124. size_t& refcntStorage = table.refcnts[this];
  1125. size_t oldRefcnt = refcntStorage;
  1126. // isa-side bits should not be set here
  1127. assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
  1128. assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
  1129. if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
  1130. uintptr_t carry;
  1131. size_t newRefcnt =
  1132. addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
  1133. if (carry) {
  1134. refcntStorage =
  1135. SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
  1136. return true;
  1137. }
  1138. else {
  1139. refcntStorage = newRefcnt;
  1140. return false;
  1141. }
  1142. }
  1143. // Move some retain counts from the side table to the isa field.
  1144. // Returns the actual count subtracted, which may be less than the request.
  1145. size_t
  1146. objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
  1147. {
  1148. assert(isa.nonpointer);
  1149. SideTable& table = SideTables()[this];
  1150. RefcountMap::iterator it = table.refcnts.find(this);
  1151. if (it == table.refcnts.end() || it->second == 0) {
  1152. // Side table retain count is zero. Can't borrow.
  1153. return 0;
  1154. }
  1155. size_t oldRefcnt = it->second;
  1156. // isa-side bits should not be set here
  1157. assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
  1158. assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
  1159. size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
  1160. assert(oldRefcnt > newRefcnt); // shouldn't underflow
  1161. it->second = newRefcnt;
  1162. return delta_rc;
  1163. }
  1164. size_t
  1165. objc_object::sidetable_getExtraRC_nolock()
  1166. {
  1167. assert(isa.nonpointer);
  1168. SideTable& table = SideTables()[this];
  1169. RefcountMap::iterator it = table.refcnts.find(this);
  1170. if (it == table.refcnts.end()) return 0;
  1171. else return it->second >> SIDE_TABLE_RC_SHIFT;
  1172. }
  1173. // SUPPORT_NONPOINTER_ISA
  1174. #endif
  1175. id
  1176. objc_object::sidetable_retain()
  1177. {
  1178. #if SUPPORT_NONPOINTER_ISA
  1179. assert(!isa.nonpointer);
  1180. #endif
  1181. SideTable& table = SideTables()[this];
  1182. table.lock();
  1183. size_t& refcntStorage = table.refcnts[this];
  1184. if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
  1185. refcntStorage += SIDE_TABLE_RC_ONE;
  1186. }
  1187. table.unlock();
  1188. return (id)this;
  1189. }
  1190. bool
  1191. objc_object::sidetable_tryRetain()
  1192. {
  1193. #if SUPPORT_NONPOINTER_ISA
  1194. assert(!isa.nonpointer);
  1195. #endif
  1196. SideTable& table = SideTables()[this];
  1197. // NO SPINLOCK HERE
  1198. // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
  1199. // which already acquired the lock on our behalf.
  1200. // fixme can't do this efficiently with os_lock_handoff_s
  1201. // if (table.slock == 0) {
  1202. // _objc_fatal("Do not call -_tryRetain.");
  1203. // }
  1204. bool result = true;
  1205. RefcountMap::iterator it = table.refcnts.find(this);
  1206. if (it == table.refcnts.end()) {
  1207. table.refcnts[this] = SIDE_TABLE_RC_ONE;
  1208. } else if (it->second & SIDE_TABLE_DEALLOCATING) {
  1209. result = false;
  1210. } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
  1211. it->second += SIDE_TABLE_RC_ONE;
  1212. }
  1213. return result;
  1214. }
  1215. uintptr_t
  1216. objc_object::sidetable_retainCount()
  1217. {
  1218. SideTable& table = SideTables()[this];
  1219. size_t refcnt_result = 1;
  1220. table.lock();
  1221. RefcountMap::iterator it = table.refcnts.find(this);
  1222. if (it != table.refcnts.end()) {
  1223. // this is valid for SIDE_TABLE_RC_PINNED too
  1224. refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
  1225. }
  1226. table.unlock();
  1227. return refcnt_result;
  1228. }
  1229. bool
  1230. objc_object::sidetable_isDeallocating()
  1231. {
  1232. SideTable& table = SideTables()[this];
  1233. // NO SPINLOCK HERE
  1234. // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
  1235. // which already acquired the lock on our behalf.
  1236. // fixme can't do this efficiently with os_lock_handoff_s
  1237. // if (table.slock == 0) {
  1238. // _objc_fatal("Do not call -_isDeallocating.");
  1239. // }
  1240. RefcountMap::iterator it = table.refcnts.find(this);
  1241. return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
  1242. }
  1243. bool
  1244. objc_object::sidetable_isWeaklyReferenced()
  1245. {
  1246. bool result = false;
  1247. SideTable& table = SideTables()[this];
  1248. table.lock();
  1249. RefcountMap::iterator it = table.refcnts.find(this);
  1250. if (it != table.refcnts.end()) {
  1251. result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
  1252. }
  1253. table.unlock();
  1254. return result;
  1255. }
  1256. void
  1257. objc_object::sidetable_setWeaklyReferenced_nolock()
  1258. {
  1259. #if SUPPORT_NONPOINTER_ISA
  1260. assert(!isa.nonpointer);
  1261. #endif
  1262. SideTable& table = SideTables()[this];
  1263. table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
  1264. }
  1265. // rdar://20206767
  1266. // return uintptr_t instead of bool so that the various raw-isa
  1267. // -release paths all return zero in eax
  1268. uintptr_t
  1269. objc_object::sidetable_release(bool performDealloc)
  1270. {
  1271. #if SUPPORT_NONPOINTER_ISA
  1272. assert(!isa.nonpointer);
  1273. #endif
  1274. SideTable& table = SideTables()[this];
  1275. bool do_dealloc = false;
  1276. table.lock();
  1277. RefcountMap::iterator it = table.refcnts.find(this);
  1278. if (it == table.refcnts.end()) {
  1279. do_dealloc = true;
  1280. table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
  1281. } else if (it->second < SIDE_TABLE_DEALLOCATING) {
  1282. // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
  1283. do_dealloc = true;
  1284. it->second |= SIDE_TABLE_DEALLOCATING;
  1285. } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
  1286. it->second -= SIDE_TABLE_RC_ONE;
  1287. }
  1288. table.unlock();
  1289. if (do_dealloc && performDealloc) {
  1290. ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
  1291. }
  1292. return do_dealloc;
  1293. }
  1294. void
  1295. objc_object::sidetable_clearDeallocating()
  1296. {
  1297. SideTable& table = SideTables()[this];
  1298. // clear any weak table items
  1299. // clear extra retain count and deallocating bit
  1300. // (fixme warn or abort if extra retain count == 0 ?)
  1301. table.lock();
  1302. RefcountMap::iterator it = table.refcnts.find(this);
  1303. if (it != table.refcnts.end()) {
  1304. if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
  1305. weak_clear_no_lock(&table.weak_table, (id)this);
  1306. }
  1307. table.refcnts.erase(it);
  1308. }
  1309. table.unlock();
  1310. }
  1311. /***********************************************************************
  1312. * Optimized retain/release/autorelease entrypoints
  1313. **********************************************************************/
  1314. #if __OBJC2__
  1315. __attribute__((aligned(16)))
  1316. id
  1317. objc_retain(id obj)
  1318. {
  1319. if (!obj) return obj;
  1320. if (obj->isTaggedPointer()) return obj;
  1321. return obj->retain();
  1322. }
  1323. __attribute__((aligned(16)))
  1324. void
  1325. objc_release(id obj)
  1326. {
  1327. if (!obj) return;
  1328. if (obj->isTaggedPointer()) return;
  1329. return obj->release();
  1330. }
  1331. __attribute__((aligned(16)))
  1332. id
  1333. objc_autorelease(id obj)
  1334. {
  1335. if (!obj) return obj;
  1336. if (obj->isTaggedPointer()) return obj;
  1337. return obj->autorelease();
  1338. }
  1339. // OBJC2
  1340. #else
  1341. // not OBJC2
  1342. id objc_retain(id obj) { return [obj retain]; }
  1343. void objc_release(id obj) { [obj release]; }
  1344. id objc_autorelease(id obj) { return [obj autorelease]; }
  1345. #endif
  1346. /***********************************************************************
  1347. * Basic operations for root class implementations a.k.a. _objc_root*()
  1348. **********************************************************************/
  1349. bool
  1350. _objc_rootTryRetain(id obj)
  1351. {
  1352. assert(obj);
  1353. return obj->rootTryRetain();
  1354. }
  1355. bool
  1356. _objc_rootIsDeallocating(id obj)
  1357. {
  1358. assert(obj);
  1359. return obj->rootIsDeallocating();
  1360. }
  1361. void
  1362. objc_clear_deallocating(id obj)
  1363. {
  1364. assert(obj);
  1365. if (obj->isTaggedPointer()) return;
  1366. obj->clearDeallocating();
  1367. }
  1368. bool
  1369. _objc_rootReleaseWasZero(id obj)
  1370. {
  1371. assert(obj);
  1372. return obj->rootReleaseShouldDealloc();
  1373. }
  1374. id
  1375. _objc_rootAutorelease(id obj)
  1376. {
  1377. assert(obj);
  1378. return obj->rootAutorelease();
  1379. }
  1380. uintptr_t
  1381. _objc_rootRetainCount(id obj)
  1382. {
  1383. assert(obj);
  1384. return obj->rootRetainCount();
  1385. }
  1386. id
  1387. _objc_rootRetain(id obj)
  1388. {
  1389. assert(obj);
  1390. return obj->rootRetain();
  1391. }
  1392. void
  1393. _objc_rootRelease(id obj)
  1394. {
  1395. assert(obj);
  1396. obj->rootRelease();
  1397. }
  1398. id
  1399. _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
  1400. {
  1401. id obj;
  1402. #if __OBJC2__
  1403. // allocWithZone under __OBJC2__ ignores the zone parameter
  1404. (void)zone;
  1405. obj = class_createInstance(cls, 0);
  1406. #else
  1407. if (!zone) {
  1408. obj = class_createInstance(cls, 0);
  1409. }
  1410. else {
  1411. obj = class_createInstanceFromZone(cls, 0, zone);
  1412. }
  1413. #endif
  1414. if (slowpath(!obj)) obj = callBadAllocHandler(cls);
  1415. return obj;
  1416. }
  1417. // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
  1418. // shortcutting optimizations.
  1419. static ALWAYS_INLINE id
  1420. callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
  1421. {
  1422. if (slowpath(checkNil && !cls)) return nil;
  1423. #if __OBJC2__
  1424. if (fastpath(!cls->ISA()->hasCustomAWZ())) {
  1425. // No alloc/allocWithZone implementation. Go straight to the allocator.
  1426. // fixme store hasCustomAWZ in the non-meta class and
  1427. // add it to canAllocFast's summary
  1428. if (fastpath(cls->canAllocFast())) {
  1429. // No ctors, raw isa, etc. Go straight to the metal.
  1430. bool dtor = cls->hasCxxDtor();
  1431. id obj = (id)calloc(1, cls->bits.fastInstanceSize());
  1432. if (slowpath(!obj)) return callBadAllocHandler(cls);
  1433. obj->initInstanceIsa(cls, dtor);
  1434. return obj;
  1435. }
  1436. else {
  1437. // Has ctor or raw isa or something. Use the slower path.
  1438. id obj = class_createInstance(cls, 0);
  1439. if (slowpath(!obj)) return callBadAllocHandler(cls);
  1440. return obj;
  1441. }
  1442. }
  1443. #endif
  1444. // No shortcuts available.
  1445. if (allocWithZone) return [cls allocWithZone:nil];
  1446. return [cls alloc];
  1447. }
  1448. // Base class implementation of +alloc. cls is not nil.
  1449. // Calls [cls allocWithZone:nil].
  1450. id
  1451. _objc_rootAlloc(Class cls)
  1452. {
  1453. return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
  1454. }
  1455. // Calls [cls alloc].
  1456. id
  1457. objc_alloc(Class cls)
  1458. {
  1459. return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
  1460. }
  1461. // Calls [cls allocWithZone:nil].
  1462. id
  1463. objc_allocWithZone(Class cls)
  1464. {
  1465. return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
  1466. }
  1467. void
  1468. _objc_rootDealloc(id obj)
  1469. {
  1470. assert(obj);
  1471. obj->rootDealloc();
  1472. }
  1473. void
  1474. _objc_rootFinalize(id obj __unused)
  1475. {
  1476. assert(obj);
  1477. _objc_fatal("_objc_rootFinalize called with garbage collection off");
  1478. }
  1479. id
  1480. _objc_rootInit(id obj)
  1481. {
  1482. // In practice, it will be hard to rely on this function.
  1483. // Many classes do not properly chain -init calls.
  1484. return obj;
  1485. }
  1486. malloc_zone_t *
  1487. _objc_rootZone(id obj)
  1488. {
  1489. (void)obj;
  1490. #if __OBJC2__
  1491. // allocWithZone under __OBJC2__ ignores the zone parameter
  1492. return malloc_default_zone();
  1493. #else
  1494. malloc_zone_t *rval = malloc_zone_from_ptr(obj);
  1495. return rval ? rval : malloc_default_zone();
  1496. #endif
  1497. }
  1498. uintptr_t
  1499. _objc_rootHash(id obj)
  1500. {
  1501. return (uintptr_t)obj;
  1502. }
  1503. void *
  1504. objc_autoreleasePoolPush(void)
  1505. {
  1506. return AutoreleasePoolPage::push();
  1507. }
  1508. void
  1509. objc_autoreleasePoolPop(void *ctxt)
  1510. {
  1511. AutoreleasePoolPage::pop(ctxt);
  1512. }
  1513. void *
  1514. _objc_autoreleasePoolPush(void)
  1515. {
  1516. return objc_autoreleasePoolPush();
  1517. }
  1518. void
  1519. _objc_autoreleasePoolPop(void *ctxt)
  1520. {
  1521. objc_autoreleasePoolPop(ctxt);
  1522. }
  1523. void
  1524. _objc_autoreleasePoolPrint(void)
  1525. {
  1526. AutoreleasePoolPage::printAll();
  1527. }
  1528. // Same as objc_release but suitable for tail-calling
  1529. // if you need the value back and don't want to push a frame before this point.
  1530. __attribute__((noinline))
  1531. static id
  1532. objc_releaseAndReturn(id obj)
  1533. {
  1534. objc_release(obj);
  1535. return obj;
  1536. }
  1537. // Same as objc_retainAutorelease but suitable for tail-calling
  1538. // if you don't want to push a frame before this point.
  1539. __attribute__((noinline))
  1540. static id
  1541. objc_retainAutoreleaseAndReturn(id obj)
  1542. {
  1543. return objc_retainAutorelease(obj);
  1544. }
  1545. // Prepare a value at +1 for return through a +0 autoreleasing convention.
  1546. id
  1547. objc_autoreleaseReturnValue(id obj)
  1548. {
  1549. if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
  1550. return objc_autorelease(obj);
  1551. }
  1552. // Prepare a value at +0 for return through a +0 autoreleasing convention.
  1553. id
  1554. objc_retainAutoreleaseReturnValue(id obj)
  1555. {
  1556. if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
  1557. // not objc_autoreleaseReturnValue(objc_retain(obj))
  1558. // because we don't need another optimization attempt
  1559. return objc_retainAutoreleaseAndReturn(obj);
  1560. }
  1561. // Accept a value returned through a +0 autoreleasing convention for use at +1.
  1562. id
  1563. objc_retainAutoreleasedReturnValue(id obj)
  1564. {
  1565. if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
  1566. return objc_retain(obj);
  1567. }
  1568. // Accept a value returned through a +0 autoreleasing convention for use at +0.
  1569. id
  1570. objc_unsafeClaimAutoreleasedReturnValue(id obj)
  1571. {
  1572. if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
  1573. return objc_releaseAndReturn(obj);
  1574. }
  1575. id
  1576. objc_retainAutorelease(id obj)
  1577. {
  1578. return objc_autorelease(objc_retain(obj));
  1579. }
  1580. void
  1581. _objc_deallocOnMainThreadHelper(void *context)
  1582. {
  1583. id obj = (id)context;
  1584. [obj dealloc];
  1585. }
  1586. // convert objc_objectptr_t to id, callee must take ownership.
  1587. id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
  1588. // convert objc_objectptr_t to id, without ownership transfer.
  1589. id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
  1590. // convert id to objc_objectptr_t, no ownership transfer.
  1591. objc_objectptr_t objc_unretainedPointer(id object) { return object; }
  1592. void arr_init(void)
  1593. {
  1594. AutoreleasePoolPage::init();
  1595. SideTableInit();
  1596. }
  1597. #if SUPPORT_TAGGED_POINTERS
  1598. // Placeholder for old debuggers. When they inspect an
  1599. // extended tagged pointer object they will see this isa.
  1600. @interface __NSUnrecognizedTaggedPointer : NSObject
  1601. @end
  1602. @implementation __NSUnrecognizedTaggedPointer
  1603. +(void) load { }
  1604. -(id) retain { return self; }
  1605. -(oneway void) release { }
  1606. -(id) autorelease { return self; }
  1607. @end
  1608. #endif
  1609. @implementation NSObject
  1610. + (void)load {
  1611. }
  1612. + (void)initialize {
  1613. }
  1614. + (id)self {
  1615. return (id)self;
  1616. }
  1617. - (id)self {
  1618. return self;
  1619. }
  1620. + (Class)class {
  1621. return self;
  1622. }
  1623. - (Class)class {
  1624. return object_getClass(self);
  1625. }
  1626. + (Class)superclass {
  1627. return self->superclass;
  1628. }
  1629. - (Class)superclass {
  1630. return [self class]->superclass;
  1631. }
  1632. + (BOOL)isMemberOfClass:(Class)cls {
  1633. return object_getClass((id)self) == cls;
  1634. }
  1635. - (BOOL)isMemberOfClass:(Class)cls {
  1636. return [self class] == cls;
  1637. }
  1638. + (BOOL)isKindOfClass:(Class)cls {
  1639. for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
  1640. if (tcls == cls) return YES;
  1641. }
  1642. return NO;
  1643. }
  1644. - (BOOL)isKindOfClass:(Class)cls {
  1645. for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
  1646. if (tcls == cls) return YES;
  1647. }
  1648. return NO;
  1649. }
  1650. + (BOOL)isSubclassOfClass:(Class)cls {
  1651. for (Class tcls = self; tcls; tcls = tcls->superclass) {
  1652. if (tcls == cls) return YES;
  1653. }
  1654. return NO;
  1655. }
  1656. + (BOOL)isAncestorOfObject:(NSObject *)obj {
  1657. for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
  1658. if (tcls == self) return YES;
  1659. }
  1660. return NO;
  1661. }
  1662. + (BOOL)instancesRespondToSelector:(SEL)sel {
  1663. if (!sel) return NO;
  1664. return class_respondsToSelector(self, sel);
  1665. }
  1666. + (BOOL)respondsToSelector:(SEL)sel {
  1667. if (!sel) return NO;
  1668. return class_respondsToSelector_inst(object_getClass(self), sel, self);
  1669. }
  1670. - (BOOL)respondsToSelector:(SEL)sel {
  1671. if (!sel) return NO;
  1672. return class_respondsToSelector_inst([self class], sel, self);
  1673. }
  1674. + (BOOL)conformsToProtocol:(Protocol *)protocol {
  1675. if (!protocol) return NO;
  1676. for (Class tcls = self; tcls; tcls = tcls->superclass) {
  1677. if (class_conformsToProtocol(tcls, protocol)) return YES;
  1678. }
  1679. return NO;
  1680. }
  1681. - (BOOL)conformsToProtocol:(Protocol *)protocol {
  1682. if (!protocol) return NO;
  1683. for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
  1684. if (class_conformsToProtocol(tcls, protocol)) return YES;
  1685. }
  1686. return NO;
  1687. }
  1688. + (NSUInteger)hash {
  1689. return _objc_rootHash(self);
  1690. }
  1691. - (NSUInteger)hash {
  1692. return _objc_rootHash(self);
  1693. }
  1694. + (BOOL)isEqual:(id)obj {
  1695. return obj == (id)self;
  1696. }
  1697. - (BOOL)isEqual:(id)obj {
  1698. return obj == self;
  1699. }
  1700. + (BOOL)isFault {
  1701. return NO;
  1702. }
  1703. - (BOOL)isFault {
  1704. return NO;
  1705. }
  1706. + (BOOL)isProxy {
  1707. return NO;
  1708. }
  1709. - (BOOL)isProxy {
  1710. return NO;
  1711. }
  1712. + (IMP)instanceMethodForSelector:(SEL)sel {
  1713. if (!sel) [self doesNotRecognizeSelector:sel];
  1714. return class_getMethodImplementation(self, sel);
  1715. }
  1716. + (IMP)methodForSelector:(SEL)sel {
  1717. if (!sel) [self doesNotRecognizeSelector:sel];
  1718. return object_getMethodImplementation((id)self, sel);
  1719. }
  1720. - (IMP)methodForSelector:(SEL)sel {
  1721. if (!sel) [self doesNotRecognizeSelector:sel];
  1722. return object_getMethodImplementation(self, sel);
  1723. }
  1724. + (BOOL)resolveClassMethod:(SEL)sel {
  1725. return NO;
  1726. }
  1727. + (BOOL)resolveInstanceMethod:(SEL)sel {
  1728. return NO;
  1729. }
  1730. // Replaced by CF (throws an NSException)
  1731. + (void)doesNotRecognizeSelector:(SEL)sel {
  1732. _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
  1733. class_getName(self), sel_getName(sel), self);
  1734. }
  1735. // Replaced by CF (throws an NSException)
  1736. - (void)doesNotRecognizeSelector:(SEL)sel {
  1737. _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
  1738. object_getClassName(self), sel_getName(sel), self);
  1739. }
  1740. + (id)performSelector:(SEL)sel {
  1741. if (!sel) [self doesNotRecognizeSelector:sel];
  1742. return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
  1743. }
  1744. + (id)performSelector:(SEL)sel withObject:(id)obj {
  1745. if (!sel) [self doesNotRecognizeSelector:sel];
  1746. return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
  1747. }
  1748. + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
  1749. if (!sel) [self doesNotRecognizeSelector:sel];
  1750. return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
  1751. }
  1752. - (id)performSelector:(SEL)sel {
  1753. if (!sel) [self doesNotRecognizeSelector:sel];
  1754. return ((id(*)(id, SEL))objc_msgSend)(self, sel);
  1755. }
  1756. - (id)performSelector:(SEL)sel withObject:(id)obj {
  1757. if (!sel) [self doesNotRecognizeSelector:sel];
  1758. return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
  1759. }
  1760. - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
  1761. if (!sel) [self doesNotRecognizeSelector:sel];
  1762. return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
  1763. }
  1764. // Replaced by CF (returns an NSMethodSignature)
  1765. + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
  1766. _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
  1767. "not available without CoreFoundation");
  1768. }
  1769. // Replaced by CF (returns an NSMethodSignature)
  1770. + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
  1771. _objc_fatal("+[NSObject methodSignatureForSelector:] "
  1772. "not available without CoreFoundation");
  1773. }
  1774. // Replaced by CF (returns an NSMethodSignature)
  1775. - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
  1776. _objc_fatal("-[NSObject methodSignatureForSelector:] "
  1777. "not available without CoreFoundation");
  1778. }
  1779. + (void)forwardInvocation:(NSInvocation *)invocation {
  1780. [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
  1781. }
  1782. - (void)forwardInvocation:(NSInvocation *)invocation {
  1783. [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
  1784. }
  1785. + (id)forwardingTargetForSelector:(SEL)sel {
  1786. return nil;
  1787. }
  1788. - (id)forwardingTargetForSelector:(SEL)sel {
  1789. return nil;
  1790. }
  1791. // Replaced by CF (returns an NSString)
  1792. + (NSString *)description {
  1793. return nil;
  1794. }
  1795. // Replaced by CF (returns an NSString)
  1796. - (NSString *)description {
  1797. return nil;
  1798. }
  1799. + (NSString *)debugDescription {
  1800. return [self description];
  1801. }
  1802. - (NSString *)debugDescription {
  1803. return [self description];
  1804. }
  1805. + (id)new {
  1806. return [callAlloc(self, false/*checkNil*/) init];
  1807. }
  1808. + (id)retain {
  1809. return (id)self;
  1810. }
  1811. // Replaced by ObjectAlloc
  1812. - (id)retain {
  1813. return ((id)self)->rootRetain();
  1814. }
  1815. + (BOOL)_tryRetain {
  1816. return YES;
  1817. }
  1818. // Replaced by ObjectAlloc
  1819. - (BOOL)_tryRetain {
  1820. return ((id)self)->rootTryRetain();
  1821. }
  1822. + (BOOL)_isDeallocating {
  1823. return NO;
  1824. }
  1825. - (BOOL)_isDeallocating {
  1826. return ((id)self)->rootIsDeallocating();
  1827. }
  1828. + (BOOL)allowsWeakReference {
  1829. return YES;
  1830. }
  1831. + (BOOL)retainWeakReference {
  1832. return YES;
  1833. }
  1834. - (BOOL)allowsWeakReference {
  1835. return ! [self _isDeallocating];
  1836. }
  1837. - (BOOL)retainWeakReference {
  1838. return [self _tryRetain];
  1839. }
  1840. + (oneway void)release {
  1841. }
  1842. // Replaced by ObjectAlloc
  1843. - (oneway void)release {
  1844. ((id)self)->rootRelease();
  1845. }
  1846. + (id)autorelease {
  1847. return (id)self;
  1848. }
  1849. // Replaced by ObjectAlloc
  1850. - (id)autorelease {
  1851. return ((id)self)->rootAutorelease();
  1852. }
  1853. + (NSUInteger)retainCount {
  1854. return ULONG_MAX;
  1855. }
  1856. - (NSUInteger)retainCount {
  1857. return ((id)self)->rootRetainCount();
  1858. }
  1859. + (id)alloc {
  1860. return _objc_rootAlloc(self);
  1861. }
  1862. // Replaced by ObjectAlloc
  1863. + (id)allocWithZone:(struct _NSZone *)zone {
  1864. return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
  1865. }
  1866. // Replaced by CF (throws an NSException)
  1867. + (id)init {
  1868. return (id)self;
  1869. }
  1870. - (id)init {
  1871. return _objc_rootInit(self);
  1872. }
  1873. // Replaced by CF (throws an NSException)
  1874. + (void)dealloc {
  1875. }
  1876. // Replaced by NSZombies
  1877. - (void)dealloc {
  1878. _objc_rootDealloc(self);
  1879. }
  1880. // Previously used by GC. Now a placeholder for binary compatibility.
  1881. - (void) finalize {
  1882. }
  1883. + (struct _NSZone *)zone {
  1884. return (struct _NSZone *)_objc_rootZone(self);
  1885. }
  1886. - (struct _NSZone *)zone {
  1887. return (struct _NSZone *)_objc_rootZone(self);
  1888. }
  1889. + (id)copy {
  1890. return (id)self;
  1891. }
  1892. + (id)copyWithZone:(struct _NSZone *)zone {
  1893. return (id)self;
  1894. }
  1895. - (id)copy {
  1896. return [(id)self copyWithZone:nil];
  1897. }
  1898. + (id)mutableCopy {
  1899. return (id)self;
  1900. }
  1901. + (id)mutableCopyWithZone:(struct _NSZone *)zone {
  1902. return (id)self;
  1903. }
  1904. - (id)mutableCopy {
  1905. return [(id)self mutableCopyWithZone:nil];
  1906. }
  1907. @end