lock_private.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. /*
  2. * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
  3. *
  4. * @APPLE_APACHE_LICENSE_HEADER_START@
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * @APPLE_APACHE_LICENSE_HEADER_END@
  19. */
  20. #ifndef __OS_LOCK_PRIVATE__
  21. #define __OS_LOCK_PRIVATE__
  22. #include <Availability.h>
  23. #include <TargetConditionals.h>
  24. #include <sys/cdefs.h>
  25. #include <stddef.h>
  26. #include <stdint.h>
  27. #include <stdbool.h>
  28. #include <os/base_private.h>
  29. #include <os/lock.h>
  30. OS_ASSUME_NONNULL_BEGIN
  31. /*! @header
  32. * Low-level lock SPI
  33. */
  34. #define OS_LOCK_SPI_VERSION 20171006
  35. /*!
  36. * @typedef os_lock_t
  37. *
  38. * @abstract
  39. * Pointer to one of the os_lock variants.
  40. */
  41. #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
  42. #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
  43. #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
  44. #define OS_LOCK(type) os_lock_##type##_s
  45. #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
  46. #if defined(__cplusplus) && __cplusplus >= 201103L
  47. #define OS_LOCK_DECL(type, size) \
  48. typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
  49. private: \
  50. OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
  51. uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
  52. public: \
  53. constexpr OS_LOCK(type)() : \
  54. osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
  55. } OS_LOCK(type)
  56. #define OS_LOCK_INIT(type) {}
  57. typedef OS_LOCK_STRUCT(base) {
  58. protected:
  59. constexpr OS_LOCK(base)() {}
  60. } *os_lock_t;
  61. #else
  62. #define OS_LOCK_DECL(type, size) \
  63. typedef OS_LOCK_STRUCT(type) { \
  64. OS_LOCK_TYPE_STRUCT(type) * osl_type; \
  65. uintptr_t _osl_##type##_opaque[size-1]; \
  66. } OS_LOCK(type)
  67. #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
  68. #ifndef OS_LOCK_T_MEMBER
  69. #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
  70. #endif
  71. typedef OS_TRANSPARENT_UNION union {
  72. OS_LOCK_T_MEMBER(base);
  73. OS_LOCK_T_MEMBER(unfair);
  74. OS_LOCK_T_MEMBER(nospin);
  75. OS_LOCK_T_MEMBER(spin);
  76. OS_LOCK_T_MEMBER(handoff);
  77. } os_lock_t;
  78. #endif
  79. /*!
  80. * @typedef os_lock_unfair_s
  81. *
  82. * @abstract
  83. * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
  84. * waits in the kernel to be woken up by an unlock. The lock value contains
  85. * ownership information that the system may use to attempt to resolve priority
  86. * inversions.
  87. *
  88. * @discussion
  89. * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
  90. * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
  91. * can potentially immediately reacquire the lock before a woken up waiter gets
  92. * an opportunity to attempt to acquire the lock, so starvation is possibile.
  93. *
  94. * Must be initialized with OS_LOCK_UNFAIR_INIT
  95. */
  96. __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
  97. __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
  98. OS_EXPORT OS_LOCK_TYPE_DECL(unfair);
  99. OS_LOCK_DECL(unfair, 2);
  100. #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
  101. /*!
  102. * @typedef os_lock_nospin_s
  103. *
  104. * @abstract
  105. * os_lock variant that does not spin on contention but waits in the kernel to
  106. * be woken up by an unlock. No attempt to resolve priority inversions is made
  107. * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
  108. *
  109. * @discussion
  110. * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
  111. * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
  112. * can potentially immediately reacquire the lock before a woken up waiter gets
  113. * an opportunity to attempt to acquire the lock, so starvation is possibile.
  114. *
  115. * Must be initialized with OS_LOCK_NOSPIN_INIT
  116. */
  117. __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
  118. __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
  119. OS_EXPORT OS_LOCK_TYPE_DECL(nospin);
  120. OS_LOCK_DECL(nospin, 2);
  121. #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
  122. /*!
  123. * @typedef os_lock_spin_s
  124. *
  125. * @abstract
  126. * Deprecated os_lock variant that on contention starts by spinning trying to
  127. * acquire the lock, then depressing the priority of the current thread and
  128. * finally blocking the thread waiting for the lock to become available.
  129. * Equivalent to OSSpinLock and equally not recommended, see discussion in
  130. * libkern/OSAtomic.h headerdoc.
  131. *
  132. * @discussion
  133. * Spinlocks are intended to be held only for very brief periods of time. The
  134. * critical section must not make syscalls and should avoid touching areas of
  135. * memory that may trigger a page fault, in particular if the critical section
  136. * may be executing on threads of widely differing priorities or on a mix of
  137. * IO-throttled and unthrottled threads.
  138. *
  139. * Must be initialized with OS_LOCK_SPIN_INIT
  140. */
  141. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  142. OS_EXPORT OS_LOCK_TYPE_DECL(spin);
  143. OS_LOCK_DECL(spin, 2);
  144. #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
  145. /*!
  146. * @typedef os_lock_handoff_s
  147. *
  148. * @abstract
  149. * os_lock variant that on contention hands off the current kernel thread to the
  150. * lock-owning userspace thread (if it is not running), temporarily overriding
  151. * its priority and IO throttle if necessary.
  152. *
  153. * @discussion
  154. * Intended for use in limited circumstances where the critical section might
  155. * be executing on threads of widely differing priorities or on a mix of
  156. * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
  157. * be likely to encounter a priority inversion.
  158. *
  159. * IMPORTANT: This lock variant is NOT intended as a general replacement for all
  160. * uses of os_lock_spin_s or OSSpinLock.
  161. *
  162. * Must be initialized with OS_LOCK_HANDOFF_INIT
  163. */
  164. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  165. OS_EXPORT OS_LOCK_TYPE_DECL(handoff);
  166. OS_LOCK_DECL(handoff, 2);
  167. #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
  168. __BEGIN_DECLS
  169. /*!
  170. * @function os_lock_lock
  171. *
  172. * @abstract
  173. * Locks an os_lock variant.
  174. *
  175. * @param lock
  176. * Pointer to one of the os_lock variants.
  177. */
  178. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  179. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  180. void os_lock_lock(os_lock_t lock);
  181. /*!
  182. * @function os_lock_trylock
  183. *
  184. * @abstract
  185. * Locks an os_lock variant if it is not already locked.
  186. *
  187. * @param lock
  188. * Pointer to one of the os_lock variants.
  189. *
  190. * @result
  191. * Returns true if the lock was succesfully locked and false if the lock was
  192. * already locked.
  193. */
  194. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  195. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  196. bool os_lock_trylock(os_lock_t lock);
  197. /*!
  198. * @function os_lock_unlock
  199. *
  200. * @abstract
  201. * Unlocks an os_lock variant.
  202. *
  203. * @param lock
  204. * Pointer to one of the os_lock variants.
  205. */
  206. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  207. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  208. void os_lock_unlock(os_lock_t lock);
  209. /*! @group os_unfair_lock SPI
  210. *
  211. * @abstract
  212. * Replacement for the deprecated OSSpinLock. Does not spin on contention but
  213. * waits in the kernel to be woken up by an unlock. The opaque lock value
  214. * contains thread ownership information that the system may use to attempt to
  215. * resolve priority inversions.
  216. *
  217. * This lock must be unlocked from the same thread that locked it, attempts to
  218. * unlock from a different thread will cause an assertion aborting the process.
  219. *
  220. * This lock must not be accessed from multiple processes or threads via shared
  221. * or multiply-mapped memory, the lock implementation relies on the address of
  222. * the lock value and owning process.
  223. *
  224. * @discussion
  225. * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
  226. * unlocker can potentially immediately reacquire the lock before a woken up
  227. * waiter gets an opportunity to attempt to acquire the lock. This may be
  228. * advantageous for performance reasons, but also makes starvation of waiters a
  229. * possibility.
  230. *
  231. * Must be initialized with OS_UNFAIR_LOCK_INIT
  232. */
  233. /*!
  234. * @typedef os_unfair_lock_options_t
  235. *
  236. * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
  237. * This flag informs the runtime that the specified lock is used for data
  238. * synchronization and that the lock owner is always able to make progress
  239. * toward releasing the lock without the help of another thread in the same
  240. * process. This hint will cause the workqueue subsystem to not create new
  241. * threads to offset for threads waiting for the lock.
  242. *
  243. * When this flag is used, the code running under the critical section should
  244. * be well known and under your control (Generally it should not call into
  245. * framework code).
  246. *
  247. * @const OS_UNFAIR_LOCK_ADAPTIVE_SPIN
  248. * This flag allows for the kernel to use adaptive spinning when the holder
  249. * of the lock is currently on core. This should only be used for locks
  250. * where the protected critical section is always extremely short.
  251. */
  252. OS_OPTIONS(os_unfair_lock_options, uint32_t,
  253. OS_UNFAIR_LOCK_NONE OS_SWIFT_NAME(None)
  254. OS_UNFAIR_LOCK_AVAILABILITY = 0x00000000,
  255. OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION OS_SWIFT_NAME(DataSynchronization)
  256. OS_UNFAIR_LOCK_AVAILABILITY = 0x00010000,
  257. OS_UNFAIR_LOCK_ADAPTIVE_SPIN OS_SWIFT_NAME(AdaptiveSpin)
  258. __API_AVAILABLE(macos(10.15), ios(13.0),
  259. tvos(13.0), watchos(6.0)) = 0x00040000
  260. );
  261. #if __swift__
  262. #define OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(name) \
  263. static const os_unfair_lock_options_t \
  264. name##_FOR_SWIFT OS_SWIFT_NAME(name) = name
  265. OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_NONE);
  266. OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
  267. OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_ADAPTIVE_SPIN);
  268. #undef OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT
  269. #endif
  270. /*!
  271. * @function os_unfair_lock_lock_with_options
  272. *
  273. * @abstract
  274. * Locks an os_unfair_lock.
  275. *
  276. * @param lock
  277. * Pointer to an os_unfair_lock.
  278. *
  279. * @param options
  280. * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
  281. */
  282. OS_UNFAIR_LOCK_AVAILABILITY
  283. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  284. void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
  285. os_unfair_lock_options_t options);
  286. /*! @group os_unfair_lock no-TSD interfaces
  287. *
  288. * Like the above, but don't require being on a thread with valid TSD, so they
  289. * can be called from injected mach-threads. The normal routines use the TSD
  290. * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
  291. * locked value instead. As a result, they will be unable to resolve priority
  292. * inversions.
  293. *
  294. * This should only be used by libpthread.
  295. *
  296. */
  297. OS_UNFAIR_LOCK_AVAILABILITY
  298. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  299. void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock);
  300. OS_UNFAIR_LOCK_AVAILABILITY
  301. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  302. void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock);
  303. /*! @group os_unfair_recursive_lock SPI
  304. *
  305. * @abstract
  306. * Similar to os_unfair_lock, but recursive.
  307. *
  308. * @discussion
  309. * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
  310. */
  311. #define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
  312. __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
  313. __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
  314. #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
  315. #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
  316. ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
  317. #elif defined(__cplusplus) && __cplusplus >= 201103L
  318. #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
  319. (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
  320. #elif defined(__cplusplus)
  321. #define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
  322. (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
  323. #else
  324. #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
  325. {OS_UNFAIR_LOCK_INIT, 0}
  326. #endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
  327. /*!
  328. * @typedef os_unfair_recursive_lock
  329. *
  330. * @abstract
  331. * Low-level lock that allows waiters to block efficiently on contention.
  332. *
  333. * @discussion
  334. * See os_unfair_lock.
  335. *
  336. */
  337. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  338. typedef struct os_unfair_recursive_lock_s {
  339. os_unfair_lock ourl_lock;
  340. uint32_t ourl_count;
  341. } os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
  342. /*!
  343. * @function os_unfair_recursive_lock_lock_with_options
  344. *
  345. * @abstract
  346. * See os_unfair_lock_lock_with_options
  347. */
  348. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  349. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  350. void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
  351. os_unfair_lock_options_t options);
  352. /*!
  353. * @function os_unfair_recursive_lock_lock
  354. *
  355. * @abstract
  356. * See os_unfair_lock_lock
  357. */
  358. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  359. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  360. void
  361. os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock)
  362. {
  363. os_unfair_recursive_lock_lock_with_options(lock, OS_UNFAIR_LOCK_NONE);
  364. }
  365. /*!
  366. * @function os_unfair_recursive_lock_trylock
  367. *
  368. * @abstract
  369. * See os_unfair_lock_trylock
  370. */
  371. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  372. OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
  373. bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
  374. /*!
  375. * @function os_unfair_recursive_lock_unlock
  376. *
  377. * @abstract
  378. * See os_unfair_lock_unlock
  379. */
  380. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  381. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  382. void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
  383. /*!
  384. * @function os_unfair_recursive_lock_tryunlock4objc
  385. *
  386. * @abstract
  387. * See os_unfair_lock_unlock
  388. */
  389. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  390. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  391. bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
  392. /*!
  393. * @function os_unfair_recursive_lock_assert_owner
  394. *
  395. * @abstract
  396. * See os_unfair_lock_assert_owner
  397. */
  398. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  399. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  400. void
  401. os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock)
  402. {
  403. os_unfair_lock_assert_owner(&lock->ourl_lock);
  404. }
  405. /*!
  406. * @function os_unfair_recursive_lock_assert_not_owner
  407. *
  408. * @abstract
  409. * See os_unfair_lock_assert_not_owner
  410. */
  411. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  412. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  413. void
  414. os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock)
  415. {
  416. os_unfair_lock_assert_not_owner(&lock->ourl_lock);
  417. }
  418. /*!
  419. * @function os_unfair_recursive_lock_owned
  420. *
  421. * @abstract
  422. * This function is reserved for the use of people who want to soft-fault
  423. * when locking models have been violated.
  424. *
  425. * @discussion
  426. * This is meant for SQLite use to detect existing misuse of the API surface,
  427. * and is not meant for anything else than calling os_log_fault() when such
  428. * contracts are violated.
  429. *
  430. * There's little point to use this value for logic as the
  431. * os_unfair_recursive_lock is already recursive anyway.
  432. */
  433. __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
  434. __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
  435. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  436. bool
  437. os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock);
  438. /*!
  439. * @function os_unfair_recursive_lock_unlock_forked_child
  440. *
  441. * @abstract
  442. * Function to be used in an atfork child handler to unlock a recursive unfair
  443. * lock.
  444. *
  445. * @discussion
  446. * This function helps with handling recursive locks in the presence of fork.
  447. *
  448. * It is typical to setup atfork handlers that will:
  449. * - take the lock in the pre-fork handler,
  450. * - drop the lock in the parent handler,
  451. * - reset the lock in the forked child.
  452. *
  453. * However, because a recursive lock may have been held by the current thread
  454. * already, reseting needs to act like an unlock. This function serves for this
  455. * purpose. Unlike os_unfair_recursive_lock_unlock(), this function will fixup
  456. * the lock ownership to match the new identity of the thread after fork().
  457. */
  458. __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
  459. __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
  460. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  461. void
  462. os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock);
  463. #if __has_attribute(cleanup)
  464. /*!
  465. * @function os_unfair_lock_scoped_guard_unlock
  466. *
  467. * @abstract
  468. * Used by os_unfair_lock_lock_scoped_guard
  469. */
  470. OS_UNFAIR_LOCK_AVAILABILITY
  471. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  472. void
  473. os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull * _Nonnull lock)
  474. {
  475. os_unfair_lock_unlock(*lock);
  476. }
  477. /*!
  478. * @function os_unfair_lock_lock_scoped_guard
  479. *
  480. * @abstract
  481. * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
  482. * automatically called when the enclosing C scope ends.
  483. *
  484. * @param name
  485. * Name for the variable holding the guard.
  486. *
  487. * @param lock
  488. * Pointer to an os_unfair_lock.
  489. *
  490. * @see os_unfair_lock_lock
  491. * @see os_unfair_lock_unlock
  492. */
  493. #define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
  494. os_unfair_lock_t \
  495. __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
  496. guard_name = lock; \
  497. os_unfair_lock_lock(guard_name)
  498. #endif // __has_attribute(cleanup)
  499. __END_DECLS
  500. OS_ASSUME_NONNULL_END
  501. /*! @group Inline os_unfair_lock interfaces
  502. *
  503. * Inline versions of the os_unfair_lock fastpath.
  504. *
  505. * Intended exclusively for special highly performance-sensitive cases where the
  506. * function calls to the os_unfair_lock API entrypoints add measurable overhead.
  507. *
  508. * Do not use in frameworks to implement synchronization API primitives that are
  509. * exposed to developers, that would lead to false positives for that API from
  510. * tools such as ThreadSanitizer.
  511. *
  512. * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
  513. * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
  514. * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
  515. * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
  516. *
  517. * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
  518. * above and still wish to use these interfaces.
  519. */
  520. #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
  521. #include <pthread/tsd_private.h>
  522. #ifdef __cplusplus
  523. extern "C++" {
  524. #if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
  525. #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
  526. #endif
  527. #include <atomic>
  528. typedef std::atomic<os_unfair_lock> _os_atomic_unfair_lock;
  529. #define OSLOCK_STD(_a) std::_a
  530. __BEGIN_DECLS
  531. #else
  532. #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
  533. #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
  534. #endif
  535. #include <stdatomic.h>
  536. typedef _Atomic(os_unfair_lock) _os_atomic_unfair_lock;
  537. #define OSLOCK_STD(_a) _a
  538. #endif
  539. OS_ASSUME_NONNULL_BEGIN
  540. #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
  541. #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
  542. #elif defined(__cplusplus) && __cplusplus >= 201103L
  543. #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
  544. #elif defined(__cplusplus)
  545. #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
  546. #else
  547. #define OS_UNFAIR_LOCK_UNLOCKED {0}
  548. #endif
  549. /*!
  550. * @function os_unfair_lock_lock_inline
  551. *
  552. * @abstract
  553. * Locks an os_unfair_lock.
  554. *
  555. * @param lock
  556. * Pointer to an os_unfair_lock.
  557. */
  558. OS_UNFAIR_LOCK_AVAILABILITY
  559. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  560. void
  561. os_unfair_lock_lock_inline(os_unfair_lock_t lock)
  562. {
  563. if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
  564. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  565. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  566. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  567. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  568. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  569. OSLOCK_STD(memory_order_acquire),
  570. OSLOCK_STD(memory_order_relaxed))) {
  571. return os_unfair_lock_lock(lock);
  572. }
  573. }
  574. /*!
  575. * @function os_unfair_lock_lock_with_options_inline
  576. *
  577. * @abstract
  578. * Locks an os_unfair_lock.
  579. *
  580. * @param lock
  581. * Pointer to an os_unfair_lock.
  582. *
  583. * @param options
  584. * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
  585. */
  586. OS_UNFAIR_LOCK_AVAILABILITY
  587. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  588. void
  589. os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock,
  590. os_unfair_lock_options_t options)
  591. {
  592. if (!_pthread_has_direct_tsd()) {
  593. return os_unfair_lock_lock_with_options(lock, options);
  594. }
  595. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  596. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  597. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  598. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  599. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  600. OSLOCK_STD(memory_order_acquire),
  601. OSLOCK_STD(memory_order_relaxed))) {
  602. return os_unfair_lock_lock_with_options(lock, options);
  603. }
  604. }
  605. /*!
  606. * @function os_unfair_lock_trylock_inline
  607. *
  608. * @abstract
  609. * Locks an os_unfair_lock if it is not already locked.
  610. *
  611. * @discussion
  612. * It is invalid to surround this function with a retry loop, if this function
  613. * returns false, the program must be able to proceed without having acquired
  614. * the lock, or it must call os_unfair_lock_lock_inline() instead.
  615. *
  616. * @param lock
  617. * Pointer to an os_unfair_lock.
  618. *
  619. * @result
  620. * Returns true if the lock was succesfully locked and false if the lock was
  621. * already locked.
  622. */
  623. OS_UNFAIR_LOCK_AVAILABILITY
  624. OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
  625. bool
  626. os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
  627. {
  628. if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
  629. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  630. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  631. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  632. return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  633. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  634. OSLOCK_STD(memory_order_acquire), OSLOCK_STD(memory_order_relaxed));
  635. }
  636. /*!
  637. * @function os_unfair_lock_unlock_inline
  638. *
  639. * @abstract
  640. * Unlocks an os_unfair_lock.
  641. *
  642. * @param lock
  643. * Pointer to an os_unfair_lock.
  644. */
  645. OS_UNFAIR_LOCK_AVAILABILITY
  646. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  647. void
  648. os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
  649. {
  650. if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
  651. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  652. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  653. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  654. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  655. (_os_atomic_unfair_lock*)lock, &locked, unlocked,
  656. OSLOCK_STD(memory_order_release),
  657. OSLOCK_STD(memory_order_relaxed))) {
  658. return os_unfair_lock_unlock(lock);
  659. }
  660. }
  661. /*!
  662. * @function os_unfair_lock_lock_inline_no_tsd_4libpthread
  663. *
  664. * @abstract
  665. * Locks an os_unfair_lock, without requiring valid TSD.
  666. *
  667. * This should only be used by libpthread.
  668. *
  669. * @param lock
  670. * Pointer to an os_unfair_lock.
  671. */
  672. OS_UNFAIR_LOCK_AVAILABILITY
  673. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  674. void
  675. os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
  676. {
  677. uint32_t mts = MACH_PORT_DEAD;
  678. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  679. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  680. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  681. OSLOCK_STD(memory_order_acquire),
  682. OSLOCK_STD(memory_order_relaxed))) {
  683. return os_unfair_lock_lock_no_tsd_4libpthread(lock);
  684. }
  685. }
  686. /*!
  687. * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread
  688. *
  689. * @abstract
  690. * Unlocks an os_unfair_lock, without requiring valid TSD.
  691. *
  692. * This should only be used by libpthread.
  693. *
  694. * @param lock
  695. * Pointer to an os_unfair_lock.
  696. */
  697. OS_UNFAIR_LOCK_AVAILABILITY
  698. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  699. void
  700. os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
  701. {
  702. uint32_t mts = MACH_PORT_DEAD;
  703. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  704. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  705. (_os_atomic_unfair_lock*)lock, &locked, unlocked,
  706. OSLOCK_STD(memory_order_release),
  707. OSLOCK_STD(memory_order_relaxed))) {
  708. return os_unfair_lock_unlock_no_tsd_4libpthread(lock);
  709. }
  710. }
  711. OS_ASSUME_NONNULL_END
  712. #undef OSLOCK_STD
  713. #ifdef __cplusplus
  714. __END_DECLS
  715. } // extern "C++"
  716. #endif
  717. #endif // OS_UNFAIR_LOCK_INLINE
  718. #endif // __OS_LOCK_PRIVATE__