objc-os.h 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Copyright (c) 2007 Apple Inc. All Rights Reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. /***********************************************************************
  24. * objc-os.h
  25. * OS portability layer.
  26. **********************************************************************/
  27. #ifndef _OBJC_OS_H
  28. #define _OBJC_OS_H
  29. #include <atomic>
  30. #include <TargetConditionals.h>
  31. #include "objc-config.h"
  32. #include "objc-private.h"
  33. #ifdef __LP64__
  34. # define WORD_SHIFT 3UL
  35. # define WORD_MASK 7UL
  36. # define WORD_BITS 64
  37. #else
  38. # define WORD_SHIFT 2UL
  39. # define WORD_MASK 3UL
  40. # define WORD_BITS 32
  41. #endif
  42. static inline uint32_t word_align(uint32_t x) {
  43. return (x + WORD_MASK) & ~WORD_MASK;
  44. }
  45. static inline size_t word_align(size_t x) {
  46. return (x + WORD_MASK) & ~WORD_MASK;
  47. }
  48. static inline size_t align16(size_t x) {
  49. return (x + size_t(15)) & ~size_t(15);
  50. }
  51. // Mix-in for classes that must not be copied.
  52. class nocopy_t {
  53. private:
  54. nocopy_t(const nocopy_t&) = delete;
  55. const nocopy_t& operator=(const nocopy_t&) = delete;
  56. protected:
  57. constexpr nocopy_t() = default;
  58. ~nocopy_t() = default;
  59. };
  60. // Version of std::atomic that does not allow implicit conversions
  61. // to/from the wrapped type, and requires an explicit memory order
  62. // be passed to load() and store().
  63. template <typename T>
  64. struct explicit_atomic : public std::atomic<T> {
  65. explicit explicit_atomic(T initial) noexcept : std::atomic<T>(std::move(initial)) {}
  66. operator T() const = delete;
  67. T load(std::memory_order order) const noexcept {
  68. return std::atomic<T>::load(order);
  69. }
  70. void store(T desired, std::memory_order order) noexcept {
  71. std::atomic<T>::store(desired, order);
  72. }
  73. // Convert a normal pointer to an atomic pointer. This is a
  74. // somewhat dodgy thing to do, but if the atomic type is lock
  75. // free and the same size as the non-atomic type, we know the
  76. // representations are the same, and the compiler generates good
  77. // code.
  78. static explicit_atomic<T> *from_pointer(T *ptr) {
  79. static_assert(sizeof(explicit_atomic<T> *) == sizeof(T *),
  80. "Size of atomic must match size of original");
  81. explicit_atomic<T> *atomic = (explicit_atomic<T> *)ptr;
  82. ASSERT(atomic->is_lock_free());
  83. return atomic;
  84. }
  85. };
  86. #if TARGET_OS_MAC
  87. # define OS_UNFAIR_LOCK_INLINE 1
  88. # ifndef __STDC_LIMIT_MACROS
  89. # define __STDC_LIMIT_MACROS
  90. # endif
  91. # include <stdio.h>
  92. # include <stdlib.h>
  93. # include <stdint.h>
  94. # include <stdarg.h>
  95. # include <string.h>
  96. # include <ctype.h>
  97. # include <errno.h>
  98. # include <dlfcn.h>
  99. # include <fcntl.h>
  100. # include <assert.h>
  101. # include <limits.h>
  102. # include <syslog.h>
  103. # include <unistd.h>
  104. # include <pthread.h>
  105. # include <crt_externs.h>
  106. # undef check
  107. # include <Availability.h>
  108. # include <TargetConditionals.h>
  109. # include <sys/mman.h>
  110. # include <sys/time.h>
  111. # include <sys/stat.h>
  112. # include <sys/param.h>
  113. # include <sys/reason.h>
  114. # include <mach/mach.h>
  115. # include <mach/vm_param.h>
  116. # include <mach/mach_time.h>
  117. # include <mach-o/dyld.h>
  118. # include <mach-o/ldsyms.h>
  119. # include <mach-o/loader.h>
  120. # include <mach-o/getsect.h>
  121. # include <mach-o/dyld_priv.h>
  122. # include <malloc/malloc.h>
  123. # include <os/lock_private.h>
  124. # include <libkern/OSAtomic.h>
  125. # include <libkern/OSCacheControl.h>
  126. # include <System/pthread_machdep.h>
  127. # include "objc-probes.h" // generated dtrace probe definitions.
  128. // Some libc functions call objc_msgSend()
  129. // so we can't use them without deadlocks.
  130. void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
  131. void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
  132. #define ALWAYS_INLINE inline __attribute__((always_inline))
  133. #define NEVER_INLINE __attribute__((noinline))
  134. #define fastpath(x) (__builtin_expect(bool(x), 1))
  135. #define slowpath(x) (__builtin_expect(bool(x), 0))
  136. static ALWAYS_INLINE uintptr_t
  137. addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
  138. {
  139. return __builtin_addcl(lhs, rhs, carryin, carryout);
  140. }
  141. static ALWAYS_INLINE uintptr_t
  142. subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
  143. {
  144. return __builtin_subcl(lhs, rhs, carryin, carryout);
  145. }
  146. #if __arm64__ && !__arm64e__
  147. static ALWAYS_INLINE
  148. uintptr_t
  149. LoadExclusive(uintptr_t *src)
  150. {
  151. return __builtin_arm_ldrex(src);
  152. }
  153. static ALWAYS_INLINE
  154. bool
  155. StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
  156. {
  157. return !__builtin_arm_strex(value, dst);
  158. }
  159. static ALWAYS_INLINE
  160. bool
  161. StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
  162. {
  163. return !__builtin_arm_stlex(value, dst);
  164. }
  165. static ALWAYS_INLINE
  166. void
  167. ClearExclusive(uintptr_t *dst __unused)
  168. {
  169. __builtin_arm_clrex();
  170. }
  171. #else
  172. static ALWAYS_INLINE
  173. uintptr_t
  174. LoadExclusive(uintptr_t *src)
  175. {
  176. return __c11_atomic_load((_Atomic(uintptr_t) *)src, __ATOMIC_RELAXED);
  177. }
  178. static ALWAYS_INLINE
  179. bool
  180. StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
  181. {
  182. return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  183. }
  184. static ALWAYS_INLINE
  185. bool
  186. StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
  187. {
  188. return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
  189. }
  190. static ALWAYS_INLINE
  191. void
  192. ClearExclusive(uintptr_t *dst __unused)
  193. {
  194. }
  195. #endif
  196. #if !TARGET_OS_IPHONE
  197. # include <CrashReporterClient.h>
  198. #else
  199. // CrashReporterClient not yet available on iOS
  200. __BEGIN_DECLS
  201. extern const char *CRSetCrashLogMessage(const char *msg);
  202. extern const char *CRGetCrashLogMessage(void);
  203. __END_DECLS
  204. #endif
  205. # if __cplusplus
  206. # include <vector>
  207. # include <algorithm>
  208. # include <functional>
  209. using namespace std;
  210. # endif
  211. # define PRIVATE_EXTERN __attribute__((visibility("hidden")))
  212. # undef __private_extern__
  213. # define __private_extern__ use_PRIVATE_EXTERN_instead
  214. # undef private_extern
  215. # define private_extern use_PRIVATE_EXTERN_instead
  216. /* Use this for functions that are intended to be breakpoint hooks.
  217. If you do not, the compiler may optimize them away.
  218. BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
  219. # define BREAKPOINT_FUNCTION(prototype) \
  220. OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
  221. prototype { asm(""); }
  222. #elif TARGET_OS_WIN32
  223. # define WINVER 0x0501 // target Windows XP and later
  224. # define _WIN32_WINNT 0x0501 // target Windows XP and later
  225. # define WIN32_LEAN_AND_MEAN
  226. // hack: windef.h typedefs BOOL as int
  227. # define BOOL WINBOOL
  228. # include <windows.h>
  229. # undef BOOL
  230. # include <stdio.h>
  231. # include <stdlib.h>
  232. # include <stdint.h>
  233. # include <stdarg.h>
  234. # include <string.h>
  235. # include <assert.h>
  236. # include <malloc.h>
  237. # include <Availability.h>
  238. # if __cplusplus
  239. # include <vector>
  240. # include <algorithm>
  241. # include <functional>
  242. using namespace std;
  243. # define __BEGIN_DECLS extern "C" {
  244. # define __END_DECLS }
  245. # else
  246. # define __BEGIN_DECLS /*empty*/
  247. # define __END_DECLS /*empty*/
  248. # endif
  249. # define PRIVATE_EXTERN
  250. # define __attribute__(x)
  251. # define inline __inline
  252. /* Use this for functions that are intended to be breakpoint hooks.
  253. If you do not, the compiler may optimize them away.
  254. BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
  255. # define BREAKPOINT_FUNCTION(prototype) \
  256. __declspec(noinline) prototype { __asm { } }
  257. /* stub out dtrace probes */
  258. # define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
  259. # define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
  260. #else
  261. # error unknown OS
  262. #endif
  263. #include <objc/objc.h>
  264. #include <objc/objc-api.h>
  265. extern void _objc_fatal(const char *fmt, ...)
  266. __attribute__((noreturn, cold, format (printf, 1, 2)));
  267. extern void _objc_fatal_with_reason(uint64_t reason, uint64_t flags,
  268. const char *fmt, ...)
  269. __attribute__((noreturn, cold, format (printf, 3, 4)));
  270. #define INIT_ONCE_PTR(var, create, delete) \
  271. do { \
  272. if (var) break; \
  273. typeof(var) v = create; \
  274. while (!var) { \
  275. if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
  276. goto done; \
  277. } \
  278. } \
  279. delete; \
  280. done:; \
  281. } while (0)
  282. #define INIT_ONCE_32(var, create, delete) \
  283. do { \
  284. if (var) break; \
  285. typeof(var) v = create; \
  286. while (!var) { \
  287. if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \
  288. goto done; \
  289. } \
  290. } \
  291. delete; \
  292. done:; \
  293. } while (0)
  294. // Thread keys reserved by libc for our use.
  295. #if defined(__PTK_FRAMEWORK_OBJC_KEY0)
  296. # define SUPPORT_DIRECT_THREAD_KEYS 1
  297. # define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
  298. # define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
  299. # define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
  300. # define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
  301. # if SUPPORT_RETURN_AUTORELEASE
  302. # define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
  303. # endif
  304. #else
  305. # define SUPPORT_DIRECT_THREAD_KEYS 0
  306. #endif
  307. #if TARGET_OS_WIN32
  308. // Compiler compatibility
  309. // OS compatibility
  310. #define strdup _strdup
  311. #define issetugid() 0
  312. #define MIN(x, y) ((x) < (y) ? (x) : (y))
  313. static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); }
  314. static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); }
  315. int asprintf(char **dstp, const char *format, ...);
  316. typedef void * malloc_zone_t;
  317. static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; }
  318. static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); }
  319. static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); }
  320. static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); }
  321. static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); }
  322. static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; }
  323. static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
  324. // OSAtomic
  325. static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
  326. {
  327. // fixme barrier is overkill
  328. long original = InterlockedCompareExchange(dst, newl, oldl);
  329. return (original == oldl);
  330. }
  331. static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst)
  332. {
  333. void *original = InterlockedCompareExchangePointer(dst, newp, oldp);
  334. return (original == oldp);
  335. }
  336. static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst)
  337. {
  338. long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl);
  339. return (original == oldl);
  340. }
  341. static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst)
  342. {
  343. return InterlockedDecrement((volatile long *)dst);
  344. }
  345. static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst)
  346. {
  347. return InterlockedIncrement((volatile long *)dst);
  348. }
  349. // Internal data types
  350. typedef DWORD objc_thread_t; // thread ID
  351. static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
  352. return t1 == t2;
  353. }
  354. static __inline objc_thread_t objc_thread_self(void) {
  355. return GetCurrentThreadId();
  356. }
  357. typedef struct {
  358. DWORD key;
  359. void (*dtor)(void *);
  360. } tls_key_t;
  361. static __inline tls_key_t tls_create(void (*dtor)(void*)) {
  362. // fixme need dtor registry for DllMain to call on thread detach
  363. tls_key_t k;
  364. k.key = TlsAlloc();
  365. k.dtor = dtor;
  366. return k;
  367. }
  368. static __inline void *tls_get(tls_key_t k) {
  369. return TlsGetValue(k.key);
  370. }
  371. static __inline void tls_set(tls_key_t k, void *value) {
  372. TlsSetValue(k.key, value);
  373. }
  374. typedef struct {
  375. CRITICAL_SECTION *lock;
  376. } mutex_t;
  377. #define MUTEX_INITIALIZER {0};
  378. extern void mutex_init(mutex_t *m);
  379. static __inline int _mutex_lock_nodebug(mutex_t *m) {
  380. // fixme error check
  381. if (!m->lock) {
  382. mutex_init(m);
  383. }
  384. EnterCriticalSection(m->lock);
  385. return 0;
  386. }
  387. static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
  388. // fixme error check
  389. if (!m->lock) {
  390. mutex_init(m);
  391. }
  392. return TryEnterCriticalSection(m->lock);
  393. }
  394. static __inline int _mutex_unlock_nodebug(mutex_t *m) {
  395. // fixme error check
  396. LeaveCriticalSection(m->lock);
  397. return 0;
  398. }
  399. typedef mutex_t spinlock_t;
  400. #define spinlock_lock(l) mutex_lock(l)
  401. #define spinlock_unlock(l) mutex_unlock(l)
  402. #define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
  403. typedef struct {
  404. HANDLE mutex;
  405. } recursive_mutex_t;
  406. #define RECURSIVE_MUTEX_INITIALIZER {0};
  407. #define RECURSIVE_MUTEX_NOT_LOCKED 1
  408. extern void recursive_mutex_init(recursive_mutex_t *m);
  409. static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
  410. ASSERT(m->mutex);
  411. return WaitForSingleObject(m->mutex, INFINITE);
  412. }
  413. static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
  414. ASSERT(m->mutex);
  415. return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
  416. }
  417. static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
  418. ASSERT(m->mutex);
  419. return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED;
  420. }
  421. /*
  422. typedef HANDLE mutex_t;
  423. static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
  424. static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
  425. static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
  426. static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
  427. */
  428. // based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
  429. // Vista-only CONDITION_VARIABLE would be better
  430. typedef struct {
  431. HANDLE mutex;
  432. HANDLE waiters; // semaphore for those in cond_wait()
  433. HANDLE waitersDone; // auto-reset event after everyone gets a broadcast
  434. CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast
  435. unsigned int waitCount;
  436. int didBroadcast;
  437. } monitor_t;
  438. #define MONITOR_INITIALIZER { 0 }
  439. #define MONITOR_NOT_ENTERED 1
  440. extern int monitor_init(monitor_t *c);
  441. static inline int _monitor_enter_nodebug(monitor_t *c) {
  442. if (!c->mutex) {
  443. int err = monitor_init(c);
  444. if (err) return err;
  445. }
  446. return WaitForSingleObject(c->mutex, INFINITE);
  447. }
  448. static inline int _monitor_leave_nodebug(monitor_t *c) {
  449. if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
  450. else return 0;
  451. }
  452. static inline int _monitor_wait_nodebug(monitor_t *c) {
  453. int last;
  454. EnterCriticalSection(&c->waitCountLock);
  455. c->waitCount++;
  456. LeaveCriticalSection(&c->waitCountLock);
  457. SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE);
  458. EnterCriticalSection(&c->waitCountLock);
  459. c->waitCount--;
  460. last = c->didBroadcast && c->waitCount == 0;
  461. LeaveCriticalSection(&c->waitCountLock);
  462. if (last) {
  463. // tell broadcaster that all waiters have awoken
  464. SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE);
  465. } else {
  466. WaitForSingleObject(c->mutex, INFINITE);
  467. }
  468. // fixme error checking
  469. return 0;
  470. }
  471. static inline int monitor_notify(monitor_t *c) {
  472. int haveWaiters;
  473. EnterCriticalSection(&c->waitCountLock);
  474. haveWaiters = c->waitCount > 0;
  475. LeaveCriticalSection(&c->waitCountLock);
  476. if (haveWaiters) {
  477. ReleaseSemaphore(c->waiters, 1, 0);
  478. }
  479. // fixme error checking
  480. return 0;
  481. }
  482. static inline int monitor_notifyAll(monitor_t *c) {
  483. EnterCriticalSection(&c->waitCountLock);
  484. if (c->waitCount == 0) {
  485. LeaveCriticalSection(&c->waitCountLock);
  486. return 0;
  487. }
  488. c->didBroadcast = 1;
  489. ReleaseSemaphore(c->waiters, c->waitCount, 0);
  490. LeaveCriticalSection(&c->waitCountLock);
  491. // fairness: wait for everyone to move from waiters to mutex
  492. WaitForSingleObject(c->waitersDone, INFINITE);
  493. // not under waitCountLock, but still under mutex
  494. c->didBroadcast = 0;
  495. // fixme error checking
  496. return 0;
  497. }
  498. typedef IMAGE_DOS_HEADER headerType;
  499. // fixme YES bundle? NO bundle? sometimes?
  500. #define headerIsBundle(hi) YES
  501. OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase;
  502. #define libobjc_header ((headerType *)&__ImageBase)
  503. // Prototypes
  504. #elif TARGET_OS_MAC
  505. // OS headers
  506. #include <mach-o/loader.h>
  507. #ifndef __LP64__
  508. # define SEGMENT_CMD LC_SEGMENT
  509. #else
  510. # define SEGMENT_CMD LC_SEGMENT_64
  511. #endif
  512. #ifndef VM_MEMORY_OBJC_DISPATCHERS
  513. # define VM_MEMORY_OBJC_DISPATCHERS 0
  514. #endif
  515. // Compiler compatibility
  516. // OS compatibility
  517. static inline uint64_t nanoseconds() {
  518. return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
  519. }
  520. // Internal data types
  521. typedef pthread_t objc_thread_t;
  522. static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) {
  523. return pthread_equal(t1, t2);
  524. }
  525. typedef pthread_key_t tls_key_t;
  526. static inline tls_key_t tls_create(void (*dtor)(void*)) {
  527. tls_key_t k;
  528. pthread_key_create(&k, dtor);
  529. return k;
  530. }
  531. static inline void *tls_get(tls_key_t k) {
  532. return pthread_getspecific(k);
  533. }
  534. static inline void tls_set(tls_key_t k, void *value) {
  535. pthread_setspecific(k, value);
  536. }
  537. #if SUPPORT_DIRECT_THREAD_KEYS
  538. static inline bool is_valid_direct_key(tls_key_t k) {
  539. return ( k == SYNC_DATA_DIRECT_KEY
  540. || k == SYNC_COUNT_DIRECT_KEY
  541. || k == AUTORELEASE_POOL_KEY
  542. || k == _PTHREAD_TSD_SLOT_PTHREAD_SELF
  543. # if SUPPORT_RETURN_AUTORELEASE
  544. || k == RETURN_DISPOSITION_KEY
  545. # endif
  546. );
  547. }
  548. static inline void *tls_get_direct(tls_key_t k)
  549. {
  550. ASSERT(is_valid_direct_key(k));
  551. if (_pthread_has_direct_tsd()) {
  552. return _pthread_getspecific_direct(k);
  553. } else {
  554. return pthread_getspecific(k);
  555. }
  556. }
  557. static inline void tls_set_direct(tls_key_t k, void *value)
  558. {
  559. ASSERT(is_valid_direct_key(k));
  560. if (_pthread_has_direct_tsd()) {
  561. _pthread_setspecific_direct(k, value);
  562. } else {
  563. pthread_setspecific(k, value);
  564. }
  565. }
  566. __attribute__((const))
  567. static inline pthread_t objc_thread_self()
  568. {
  569. return (pthread_t)tls_get_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
  570. }
  571. #else
  572. __attribute__((const))
  573. static inline pthread_t objc_thread_self()
  574. {
  575. return pthread_self();
  576. }
  577. #endif // SUPPORT_DIRECT_THREAD_KEYS
  578. template <bool Debug> class mutex_tt;
  579. template <bool Debug> class monitor_tt;
  580. template <bool Debug> class recursive_mutex_tt;
  581. #if DEBUG
  582. # define LOCKDEBUG 1
  583. #else
  584. # define LOCKDEBUG 0
  585. #endif
  586. using spinlock_t = mutex_tt<LOCKDEBUG>;
  587. using mutex_t = mutex_tt<LOCKDEBUG>;
  588. using monitor_t = monitor_tt<LOCKDEBUG>;
  589. using recursive_mutex_t = recursive_mutex_tt<LOCKDEBUG>;
  590. // Use fork_unsafe_lock to get a lock that isn't
  591. // acquired and released around fork().
  592. // All fork-safe locks are checked in debug builds.
  593. struct fork_unsafe_lock_t {
  594. constexpr fork_unsafe_lock_t() = default;
  595. };
  596. extern const fork_unsafe_lock_t fork_unsafe_lock;
  597. #include "objc-lockdebug.h"
  598. template <bool Debug>
  599. class mutex_tt : nocopy_t {
  600. os_unfair_lock mLock;
  601. public:
  602. constexpr mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) {
  603. lockdebug_remember_mutex(this);
  604. }
  605. constexpr mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
  606. void lock() {
  607. lockdebug_mutex_lock(this);
  608. // <rdar://problem/50384154>
  609. uint32_t opts = OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | OS_UNFAIR_LOCK_ADAPTIVE_SPIN;
  610. os_unfair_lock_lock_with_options_inline
  611. (&mLock, (os_unfair_lock_options_t)opts);
  612. }
  613. void unlock() {
  614. lockdebug_mutex_unlock(this);
  615. os_unfair_lock_unlock_inline(&mLock);
  616. }
  617. void forceReset() {
  618. lockdebug_mutex_unlock(this);
  619. bzero(&mLock, sizeof(mLock));
  620. mLock = os_unfair_lock OS_UNFAIR_LOCK_INIT;
  621. }
  622. void assertLocked() {
  623. lockdebug_mutex_assert_locked(this);
  624. }
  625. void assertUnlocked() {
  626. lockdebug_mutex_assert_unlocked(this);
  627. }
  628. // Address-ordered lock discipline for a pair of locks.
  629. static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) {
  630. if (lock1 < lock2) {
  631. lock1->lock();
  632. lock2->lock();
  633. } else {
  634. lock2->lock();
  635. if (lock2 != lock1) lock1->lock();
  636. }
  637. }
  638. static void unlockTwo(mutex_tt *lock1, mutex_tt *lock2) {
  639. lock1->unlock();
  640. if (lock2 != lock1) lock2->unlock();
  641. }
  642. // Scoped lock and unlock
  643. class locker : nocopy_t {
  644. mutex_tt& lock;
  645. public:
  646. locker(mutex_tt& newLock)
  647. : lock(newLock) { lock.lock(); }
  648. ~locker() { lock.unlock(); }
  649. };
  650. // Either scoped lock and unlock, or NOP.
  651. class conditional_locker : nocopy_t {
  652. mutex_tt& lock;
  653. bool didLock;
  654. public:
  655. conditional_locker(mutex_tt& newLock, bool shouldLock)
  656. : lock(newLock), didLock(shouldLock)
  657. {
  658. if (shouldLock) lock.lock();
  659. }
  660. ~conditional_locker() { if (didLock) lock.unlock(); }
  661. };
  662. };
  663. using mutex_locker_t = mutex_tt<LOCKDEBUG>::locker;
  664. using conditional_mutex_locker_t = mutex_tt<LOCKDEBUG>::conditional_locker;
  665. template <bool Debug>
  666. class recursive_mutex_tt : nocopy_t {
  667. os_unfair_recursive_lock mLock;
  668. public:
  669. constexpr recursive_mutex_tt() : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT) {
  670. lockdebug_remember_recursive_mutex(this);
  671. }
  672. constexpr recursive_mutex_tt(const fork_unsafe_lock_t unsafe)
  673. : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT)
  674. { }
  675. void lock()
  676. {
  677. lockdebug_recursive_mutex_lock(this);
  678. os_unfair_recursive_lock_lock(&mLock);
  679. }
  680. void unlock()
  681. {
  682. lockdebug_recursive_mutex_unlock(this);
  683. os_unfair_recursive_lock_unlock(&mLock);
  684. }
  685. void forceReset()
  686. {
  687. lockdebug_recursive_mutex_unlock(this);
  688. bzero(&mLock, sizeof(mLock));
  689. mLock = os_unfair_recursive_lock OS_UNFAIR_RECURSIVE_LOCK_INIT;
  690. }
  691. bool tryLock()
  692. {
  693. if (os_unfair_recursive_lock_trylock(&mLock)) {
  694. lockdebug_recursive_mutex_lock(this);
  695. return true;
  696. }
  697. return false;
  698. }
  699. bool tryUnlock()
  700. {
  701. if (os_unfair_recursive_lock_tryunlock4objc(&mLock)) {
  702. lockdebug_recursive_mutex_unlock(this);
  703. return true;
  704. }
  705. return false;
  706. }
  707. void assertLocked() {
  708. lockdebug_recursive_mutex_assert_locked(this);
  709. }
  710. void assertUnlocked() {
  711. lockdebug_recursive_mutex_assert_unlocked(this);
  712. }
  713. };
  714. template <bool Debug>
  715. class monitor_tt {
  716. pthread_mutex_t mutex;
  717. pthread_cond_t cond;
  718. public:
  719. constexpr monitor_tt()
  720. : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
  721. {
  722. lockdebug_remember_monitor(this);
  723. }
  724. monitor_tt(const fork_unsafe_lock_t unsafe)
  725. : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
  726. { }
  727. void enter()
  728. {
  729. lockdebug_monitor_enter(this);
  730. int err = pthread_mutex_lock(&mutex);
  731. if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
  732. }
  733. void leave()
  734. {
  735. lockdebug_monitor_leave(this);
  736. int err = pthread_mutex_unlock(&mutex);
  737. if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
  738. }
  739. void wait()
  740. {
  741. lockdebug_monitor_wait(this);
  742. int err = pthread_cond_wait(&cond, &mutex);
  743. if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
  744. }
  745. void notify()
  746. {
  747. int err = pthread_cond_signal(&cond);
  748. if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
  749. }
  750. void notifyAll()
  751. {
  752. int err = pthread_cond_broadcast(&cond);
  753. if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
  754. }
  755. void forceReset()
  756. {
  757. lockdebug_monitor_leave(this);
  758. bzero(&mutex, sizeof(mutex));
  759. bzero(&cond, sizeof(cond));
  760. mutex = pthread_mutex_t PTHREAD_MUTEX_INITIALIZER;
  761. cond = pthread_cond_t PTHREAD_COND_INITIALIZER;
  762. }
  763. void assertLocked()
  764. {
  765. lockdebug_monitor_assert_locked(this);
  766. }
  767. void assertUnlocked()
  768. {
  769. lockdebug_monitor_assert_unlocked(this);
  770. }
  771. };
  772. // semaphore_create formatted for INIT_ONCE use
  773. static inline semaphore_t create_semaphore(void)
  774. {
  775. semaphore_t sem;
  776. kern_return_t k;
  777. k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
  778. if (k) _objc_fatal("semaphore_create failed (0x%x)", k);
  779. return sem;
  780. }
  781. #ifndef __LP64__
  782. typedef struct mach_header headerType;
  783. typedef struct segment_command segmentType;
  784. typedef struct section sectionType;
  785. #else
  786. typedef struct mach_header_64 headerType;
  787. typedef struct segment_command_64 segmentType;
  788. typedef struct section_64 sectionType;
  789. #endif
  790. #define headerIsBundle(hi) (hi->mhdr()->filetype == MH_BUNDLE)
  791. #define libobjc_header ((headerType *)&_mh_dylib_header)
  792. // Prototypes
  793. /* Secure /tmp usage */
  794. extern int secure_open(const char *filename, int flags, uid_t euid);
  795. #else
  796. #error unknown OS
  797. #endif
  798. static inline void *
  799. memdup(const void *mem, size_t len)
  800. {
  801. void *dup = malloc(len);
  802. memcpy(dup, mem, len);
  803. return dup;
  804. }
  805. // strdup that doesn't copy read-only memory
  806. static inline char *
  807. strdupIfMutable(const char *str)
  808. {
  809. size_t size = strlen(str) + 1;
  810. if (_dyld_is_memory_immutable(str, size)) {
  811. return (char *)str;
  812. } else {
  813. return (char *)memdup(str, size);
  814. }
  815. }
  816. // free strdupIfMutable() result
  817. static inline void
  818. freeIfMutable(char *str)
  819. {
  820. size_t size = strlen(str) + 1;
  821. if (_dyld_is_memory_immutable(str, size)) {
  822. // nothing
  823. } else {
  824. free(str);
  825. }
  826. }
  827. // nil-checking unsigned strdup
  828. static inline uint8_t *
  829. ustrdupMaybeNil(const uint8_t *str)
  830. {
  831. if (!str) return nil;
  832. return (uint8_t *)strdupIfMutable((char *)str);
  833. }
  834. // OS version checking:
  835. //
  836. // sdkVersion()
  837. // DYLD_OS_VERSION(mac, ios, tv, watch, bridge)
  838. // sdkIsOlderThan(mac, ios, tv, watch, bridge)
  839. // sdkIsAtLeast(mac, ios, tv, watch, bridge)
  840. //
  841. // This version order matches OBJC_AVAILABLE.
  842. #if TARGET_OS_OSX
  843. # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_MACOSX_VERSION_##x
  844. # define sdkVersion() dyld_get_program_sdk_version()
  845. #elif TARGET_OS_IOS
  846. # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##i
  847. # define sdkVersion() dyld_get_program_sdk_version()
  848. #elif TARGET_OS_TV
  849. // dyld does not currently have distinct constants for tvOS
  850. # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
  851. # define sdkVersion() dyld_get_program_sdk_version()
  852. #elif TARGET_OS_BRIDGE
  853. # if TARGET_OS_WATCH
  854. # error bridgeOS 1.0 not supported
  855. # endif
  856. // fixme don't need bridgeOS versioning yet
  857. # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
  858. # define sdkVersion() dyld_get_program_sdk_bridge_os_version()
  859. #elif TARGET_OS_WATCH
  860. # define DYLD_OS_VERSION(x, i, t, w, b) DYLD_WATCHOS_VERSION_##w
  861. // watchOS has its own API for compatibility reasons
  862. # define sdkVersion() dyld_get_program_sdk_watch_os_version()
  863. #else
  864. # error unknown OS
  865. #endif
  866. #define sdkIsOlderThan(x, i, t, w, b) \
  867. (sdkVersion() < DYLD_OS_VERSION(x, i, t, w, b))
  868. #define sdkIsAtLeast(x, i, t, w, b) \
  869. (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w, b))
  870. // Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan()
  871. #define DYLD_MACOSX_VERSION_0 0
  872. #define DYLD_IOS_VERSION_0 0
  873. #define DYLD_TVOS_VERSION_0 0
  874. #define DYLD_WATCHOS_VERSION_0 0
  875. #define DYLD_BRIDGEOS_VERSION_0 0
  876. // Pretty-print a DYLD_*_VERSION_* constant.
  877. #define SDK_FORMAT "%hu.%hhu.%hhu"
  878. #define FORMAT_SDK(v) \
  879. (unsigned short)(((uint32_t)(v))>>16), \
  880. (unsigned char)(((uint32_t)(v))>>8), \
  881. (unsigned char)(((uint32_t)(v))>>0)
  882. // fork() safety requires careful tracking of all locks.
  883. // Our custom lock types check this in debug builds.
  884. // Disallow direct use of all other lock types.
  885. typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE;
  886. typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE;
  887. typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE;
  888. typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE;
  889. #endif