lock_private.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. /*
  2. * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
  3. *
  4. * @APPLE_APACHE_LICENSE_HEADER_START@
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * @APPLE_APACHE_LICENSE_HEADER_END@
  19. */
  20. #ifndef __OS_LOCK_PRIVATE__
  21. #define __OS_LOCK_PRIVATE__
  22. #include <Availability.h>
  23. #include <TargetConditionals.h>
  24. #include <sys/cdefs.h>
  25. #include <stddef.h>
  26. #include <stdint.h>
  27. #include <stdbool.h>
  28. #include <os/base_private.h>
  29. #include <os/lock.h>
  30. OS_ASSUME_NONNULL_BEGIN
  31. /*! @header
  32. * Low-level lock SPI
  33. */
  34. #define OS_LOCK_SPI_VERSION 20171006
  35. /*!
  36. * @typedef os_lock_t
  37. *
  38. * @abstract
  39. * Pointer to one of the os_lock variants.
  40. */
  41. #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
  42. #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
  43. #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
  44. #define OS_LOCK(type) os_lock_##type##_s
  45. #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
  46. #if defined(__cplusplus) && __cplusplus >= 201103L
  47. #define OS_LOCK_DECL(type, size) \
  48. typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
  49. private: \
  50. OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
  51. uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
  52. public: \
  53. constexpr OS_LOCK(type)() : \
  54. osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
  55. } OS_LOCK(type)
  56. #define OS_LOCK_INIT(type) {}
  57. typedef OS_LOCK_STRUCT(base) {
  58. protected:
  59. constexpr OS_LOCK(base)() {}
  60. } *os_lock_t;
  61. #else
  62. #define OS_LOCK_DECL(type, size) \
  63. typedef OS_LOCK_STRUCT(type) { \
  64. OS_LOCK_TYPE_STRUCT(type) * osl_type; \
  65. uintptr_t _osl_##type##_opaque[size-1]; \
  66. } OS_LOCK(type)
  67. #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
  68. #ifndef OS_LOCK_T_MEMBER
  69. #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
  70. #endif
  71. typedef OS_TRANSPARENT_UNION union {
  72. OS_LOCK_T_MEMBER(base);
  73. OS_LOCK_T_MEMBER(unfair);
  74. OS_LOCK_T_MEMBER(nospin);
  75. OS_LOCK_T_MEMBER(spin);
  76. OS_LOCK_T_MEMBER(handoff);
  77. OS_LOCK_T_MEMBER(eliding);
  78. OS_LOCK_T_MEMBER(transactional);
  79. } os_lock_t;
  80. #endif
  81. /*!
  82. * @typedef os_lock_unfair_s
  83. *
  84. * @abstract
  85. * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
  86. * waits in the kernel to be woken up by an unlock. The lock value contains
  87. * ownership information that the system may use to attempt to resolve priority
  88. * inversions.
  89. *
  90. * @discussion
  91. * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
  92. * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
  93. * can potentially immediately reacquire the lock before a woken up waiter gets
  94. * an opportunity to attempt to acquire the lock, so starvation is possibile.
  95. *
  96. * Must be initialized with OS_LOCK_UNFAIR_INIT
  97. */
  98. __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
  99. __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
  100. OS_EXPORT OS_LOCK_TYPE_DECL(unfair);
  101. OS_LOCK_DECL(unfair, 2);
  102. #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
  103. /*!
  104. * @typedef os_lock_nospin_s
  105. *
  106. * @abstract
  107. * os_lock variant that does not spin on contention but waits in the kernel to
  108. * be woken up by an unlock. No attempt to resolve priority inversions is made
  109. * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
  110. *
  111. * @discussion
  112. * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
  113. * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
  114. * can potentially immediately reacquire the lock before a woken up waiter gets
  115. * an opportunity to attempt to acquire the lock, so starvation is possibile.
  116. *
  117. * Must be initialized with OS_LOCK_NOSPIN_INIT
  118. */
  119. __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
  120. __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
  121. OS_EXPORT OS_LOCK_TYPE_DECL(nospin);
  122. OS_LOCK_DECL(nospin, 2);
  123. #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
  124. /*!
  125. * @typedef os_lock_spin_s
  126. *
  127. * @abstract
  128. * Deprecated os_lock variant that on contention starts by spinning trying to
  129. * acquire the lock, then depressing the priority of the current thread and
  130. * finally blocking the thread waiting for the lock to become available.
  131. * Equivalent to OSSpinLock and equally not recommended, see discussion in
  132. * libkern/OSAtomic.h headerdoc.
  133. *
  134. * @discussion
  135. * Spinlocks are intended to be held only for very brief periods of time. The
  136. * critical section must not make syscalls and should avoid touching areas of
  137. * memory that may trigger a page fault, in particular if the critical section
  138. * may be executing on threads of widely differing priorities or on a mix of
  139. * IO-throttled and unthrottled threads.
  140. *
  141. * Must be initialized with OS_LOCK_SPIN_INIT
  142. */
  143. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  144. OS_EXPORT OS_LOCK_TYPE_DECL(spin);
  145. OS_LOCK_DECL(spin, 2);
  146. #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
  147. /*!
  148. * @typedef os_lock_handoff_s
  149. *
  150. * @abstract
  151. * os_lock variant that on contention hands off the current kernel thread to the
  152. * lock-owning userspace thread (if it is not running), temporarily overriding
  153. * its priority and IO throttle if necessary.
  154. *
  155. * @discussion
  156. * Intended for use in limited circumstances where the critical section might
  157. * be executing on threads of widely differing priorities or on a mix of
  158. * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
  159. * be likely to encounter a priority inversion.
  160. *
  161. * IMPORTANT: This lock variant is NOT intended as a general replacement for all
  162. * uses of os_lock_spin_s or OSSpinLock.
  163. *
  164. * Must be initialized with OS_LOCK_HANDOFF_INIT
  165. */
  166. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  167. OS_EXPORT OS_LOCK_TYPE_DECL(handoff);
  168. OS_LOCK_DECL(handoff, 2);
  169. #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
  170. #if !TARGET_OS_IPHONE
  171. /*!
  172. * @typedef os_lock_eliding_s
  173. *
  174. * @abstract
  175. * os_lock variant that uses hardware lock elision support if available to allow
  176. * multiple processors to concurrently execute a critical section as long as
  177. * they don't perform conflicting operations on each other's data. In case of
  178. * conflict, the lock reverts to exclusive operation and os_lock_spin_s behavior
  179. * on contention (at potential extra cost for the aborted attempt at lock-elided
  180. * concurrent execution). If hardware HLE support is not present, this lock
  181. * variant behaves like os_lock_spin_s.
  182. *
  183. * @discussion
  184. * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
  185. * with HLE support to ensure the data access pattern and length of the critical
  186. * section allows lock-elided execution to succeed frequently enough to offset
  187. * the cost of any aborted concurrent execution.
  188. *
  189. * Must be initialized with OS_LOCK_ELIDING_INIT
  190. */
  191. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA)
  192. OS_EXPORT OS_LOCK_TYPE_DECL(eliding);
  193. OS_LOCK_DECL(eliding, 8) OS_ALIGNED(64);
  194. #define OS_LOCK_ELIDING_INIT OS_LOCK_INIT(eliding)
  195. /*!
  196. * @typedef os_lock_transactional_s
  197. *
  198. * @abstract
  199. * os_lock variant that uses hardware restricted transactional memory support if
  200. * available to allow multiple processors to concurrently execute the critical
  201. * section as a transactional region. If transactional execution aborts, the
  202. * lock reverts to exclusive operation and os_lock_spin_s behavior on contention
  203. * (at potential extra cost for the aborted attempt at transactional concurrent
  204. * execution). If hardware RTM support is not present, this lock variant behaves
  205. * like os_lock_eliding_s.
  206. *
  207. * @discussion
  208. * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
  209. * with RTM support to ensure the data access pattern and length of the critical
  210. * section allows transactional execution to succeed frequently enough to offset
  211. * the cost of any aborted transactions.
  212. *
  213. * Must be initialized with OS_LOCK_TRANSACTIONAL_INIT
  214. */
  215. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA)
  216. OS_EXPORT OS_LOCK_TYPE_DECL(transactional);
  217. OS_LOCK_DECL(transactional, 8) OS_ALIGNED(64);
  218. #define OS_LOCK_TRANSACTIONAL_INIT OS_LOCK_INIT(transactional)
  219. #endif
  220. __BEGIN_DECLS
  221. /*!
  222. * @function os_lock_lock
  223. *
  224. * @abstract
  225. * Locks an os_lock variant.
  226. *
  227. * @param lock
  228. * Pointer to one of the os_lock variants.
  229. */
  230. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  231. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  232. void os_lock_lock(os_lock_t lock);
  233. /*!
  234. * @function os_lock_trylock
  235. *
  236. * @abstract
  237. * Locks an os_lock variant if it is not already locked.
  238. *
  239. * @param lock
  240. * Pointer to one of the os_lock variants.
  241. *
  242. * @result
  243. * Returns true if the lock was succesfully locked and false if the lock was
  244. * already locked.
  245. */
  246. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  247. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  248. bool os_lock_trylock(os_lock_t lock);
  249. /*!
  250. * @function os_lock_unlock
  251. *
  252. * @abstract
  253. * Unlocks an os_lock variant.
  254. *
  255. * @param lock
  256. * Pointer to one of the os_lock variants.
  257. */
  258. __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
  259. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  260. void os_lock_unlock(os_lock_t lock);
  261. /*! @group os_unfair_lock SPI
  262. *
  263. * @abstract
  264. * Replacement for the deprecated OSSpinLock. Does not spin on contention but
  265. * waits in the kernel to be woken up by an unlock. The opaque lock value
  266. * contains thread ownership information that the system may use to attempt to
  267. * resolve priority inversions.
  268. *
  269. * This lock must be unlocked from the same thread that locked it, attemps to
  270. * unlock from a different thread will cause an assertion aborting the process.
  271. *
  272. * This lock must not be accessed from multiple processes or threads via shared
  273. * or multiply-mapped memory, the lock implementation relies on the address of
  274. * the lock value and owning process.
  275. *
  276. * @discussion
  277. * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
  278. * unlocker can potentially immediately reacquire the lock before a woken up
  279. * waiter gets an opportunity to attempt to acquire the lock. This may be
  280. * advantageous for performance reasons, but also makes starvation of waiters a
  281. * possibility.
  282. *
  283. * Must be initialized with OS_UNFAIR_LOCK_INIT
  284. */
  285. /*!
  286. * @typedef os_unfair_lock_options_t
  287. *
  288. * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
  289. * This flag informs the runtime that the specified lock is used for data
  290. * synchronization and that the lock owner is always able to make progress
  291. * toward releasing the lock without the help of another thread in the same
  292. * process. This hint will cause the workqueue subsystem to not create new
  293. * threads to offset for threads waiting for the lock.
  294. *
  295. * When this flag is used, the code running under the critical section should
  296. * be well known and under your control (Generally it should not call into
  297. * framework code).
  298. */
  299. OS_ENUM(os_unfair_lock_options, uint32_t,
  300. OS_UNFAIR_LOCK_NONE
  301. OS_UNFAIR_LOCK_AVAILABILITY = 0x00000000,
  302. OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
  303. OS_UNFAIR_LOCK_AVAILABILITY = 0x00010000,
  304. );
  305. /*!
  306. * @function os_unfair_lock_lock_with_options
  307. *
  308. * @abstract
  309. * Locks an os_unfair_lock.
  310. *
  311. * @param lock
  312. * Pointer to an os_unfair_lock.
  313. *
  314. * @param options
  315. * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
  316. */
  317. OS_UNFAIR_LOCK_AVAILABILITY
  318. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  319. void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
  320. os_unfair_lock_options_t options);
  321. /*! @group os_unfair_lock no-TSD interfaces
  322. *
  323. * Like the above, but don't require being on a thread with valid TSD, so they
  324. * can be called from injected mach-threads. The normal routines use the TSD
  325. * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
  326. * locked value instead. As a result, they will be unable to resolve priority
  327. * inversions.
  328. *
  329. * This should only be used by libpthread.
  330. *
  331. */
  332. OS_UNFAIR_LOCK_AVAILABILITY
  333. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  334. void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock);
  335. OS_UNFAIR_LOCK_AVAILABILITY
  336. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  337. void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock);
  338. /*! @group os_unfair_recursive_lock SPI
  339. *
  340. * @abstract
  341. * Similar to os_unfair_lock, but recursive.
  342. *
  343. * @discussion
  344. * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
  345. */
  346. #define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
  347. __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
  348. __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
  349. #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
  350. #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
  351. ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
  352. #elif defined(__cplusplus) && __cplusplus >= 201103L
  353. #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
  354. (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
  355. #elif defined(__cplusplus)
  356. #define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
  357. (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
  358. #else
  359. #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
  360. {OS_UNFAIR_LOCK_INIT, 0}
  361. #endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
  362. /*!
  363. * @typedef os_unfair_recursive_lock
  364. *
  365. * @abstract
  366. * Low-level lock that allows waiters to block efficiently on contention.
  367. *
  368. * @discussion
  369. * See os_unfair_lock.
  370. *
  371. */
  372. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  373. typedef struct os_unfair_recursive_lock_s {
  374. os_unfair_lock ourl_lock;
  375. uint32_t ourl_count;
  376. } os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
  377. /*!
  378. * @function os_unfair_recursive_lock_lock_with_options
  379. *
  380. * @abstract
  381. * See os_unfair_lock_lock_with_options
  382. */
  383. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  384. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  385. void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
  386. os_unfair_lock_options_t options);
  387. /*!
  388. * @function os_unfair_recursive_lock_lock
  389. *
  390. * @abstract
  391. * See os_unfair_lock_lock
  392. */
  393. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  394. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  395. void
  396. os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock)
  397. {
  398. os_unfair_recursive_lock_lock_with_options(lock, OS_UNFAIR_LOCK_NONE);
  399. }
  400. /*!
  401. * @function os_unfair_recursive_lock_trylock
  402. *
  403. * @abstract
  404. * See os_unfair_lock_trylock
  405. */
  406. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  407. OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
  408. bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
  409. /*!
  410. * @function os_unfair_recursive_lock_unlock
  411. *
  412. * @abstract
  413. * See os_unfair_lock_unlock
  414. */
  415. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  416. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  417. void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
  418. /*!
  419. * @function os_unfair_recursive_lock_tryunlock4objc
  420. *
  421. * @abstract
  422. * See os_unfair_lock_unlock
  423. */
  424. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  425. OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
  426. bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
  427. /*!
  428. * @function os_unfair_recursive_lock_assert_owner
  429. *
  430. * @abstract
  431. * See os_unfair_lock_assert_owner
  432. */
  433. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  434. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  435. void
  436. os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock)
  437. {
  438. os_unfair_lock_assert_owner(&lock->ourl_lock);
  439. }
  440. /*!
  441. * @function os_unfair_recursive_lock_assert_not_owner
  442. *
  443. * @abstract
  444. * See os_unfair_lock_assert_not_owner
  445. */
  446. OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
  447. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  448. void
  449. os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock)
  450. {
  451. os_unfair_lock_assert_not_owner(&lock->ourl_lock);
  452. }
  453. #if __has_attribute(cleanup)
  454. /*!
  455. * @function os_unfair_lock_scoped_guard_unlock
  456. *
  457. * @abstract
  458. * Used by os_unfair_lock_lock_scoped_guard
  459. */
  460. OS_UNFAIR_LOCK_AVAILABILITY
  461. OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
  462. void
  463. os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull * _Nonnull lock)
  464. {
  465. os_unfair_lock_unlock(*lock);
  466. }
  467. /*!
  468. * @function os_unfair_lock_lock_scoped_guard
  469. *
  470. * @abstract
  471. * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
  472. * automatically called when the enclosing C scope ends.
  473. *
  474. * @param name
  475. * Name for the variable holding the guard.
  476. *
  477. * @param lock
  478. * Pointer to an os_unfair_lock.
  479. *
  480. * @see os_unfair_lock_lock
  481. * @see os_unfair_lock_unlock
  482. */
  483. #define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
  484. os_unfair_lock_t \
  485. __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
  486. guard_name = lock; \
  487. os_unfair_lock_lock(guard_name)
  488. #endif // __has_attribute(cleanup)
  489. __END_DECLS
  490. OS_ASSUME_NONNULL_END
  491. /*! @group Inline os_unfair_lock interfaces
  492. *
  493. * Inline versions of the os_unfair_lock fastpath.
  494. *
  495. * Intended exclusively for special highly performance-sensitive cases where the
  496. * function calls to the os_unfair_lock API entrypoints add measurable overhead.
  497. *
  498. * Do not use in frameworks to implement synchronization API primitives that are
  499. * exposed to developers, that would lead to false positives for that API from
  500. * tools such as ThreadSanitizer.
  501. *
  502. * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
  503. * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
  504. * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
  505. * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
  506. *
  507. * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
  508. * above and still wish to use these interfaces.
  509. */
  510. #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
  511. #include <pthread/tsd_private.h>
  512. #ifdef __cplusplus
  513. extern "C++" {
  514. #if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
  515. #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
  516. #endif
  517. #include <atomic>
  518. typedef std::atomic<os_unfair_lock> _os_atomic_unfair_lock;
  519. #define OSLOCK_STD(_a) std::_a
  520. __BEGIN_DECLS
  521. #else
  522. #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
  523. #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
  524. #endif
  525. #include <stdatomic.h>
  526. typedef _Atomic(os_unfair_lock) _os_atomic_unfair_lock;
  527. #define OSLOCK_STD(_a) _a
  528. #endif
  529. OS_ASSUME_NONNULL_BEGIN
  530. #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
  531. #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
  532. #elif defined(__cplusplus) && __cplusplus >= 201103L
  533. #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
  534. #elif defined(__cplusplus)
  535. #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
  536. #else
  537. #define OS_UNFAIR_LOCK_UNLOCKED {0}
  538. #endif
  539. /*!
  540. * @function os_unfair_lock_lock_inline
  541. *
  542. * @abstract
  543. * Locks an os_unfair_lock.
  544. *
  545. * @param lock
  546. * Pointer to an os_unfair_lock.
  547. */
  548. OS_UNFAIR_LOCK_AVAILABILITY
  549. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  550. void
  551. os_unfair_lock_lock_inline(os_unfair_lock_t lock)
  552. {
  553. if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
  554. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  555. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  556. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  557. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  558. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  559. OSLOCK_STD(memory_order_acquire),
  560. OSLOCK_STD(memory_order_relaxed))) {
  561. return os_unfair_lock_lock(lock);
  562. }
  563. }
  564. /*!
  565. * @function os_unfair_lock_lock_with_options_inline
  566. *
  567. * @abstract
  568. * Locks an os_unfair_lock.
  569. *
  570. * @param lock
  571. * Pointer to an os_unfair_lock.
  572. *
  573. * @param options
  574. * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
  575. */
  576. OS_UNFAIR_LOCK_AVAILABILITY
  577. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  578. void
  579. os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock,
  580. os_unfair_lock_options_t options)
  581. {
  582. if (!_pthread_has_direct_tsd()) {
  583. return os_unfair_lock_lock_with_options(lock, options);
  584. }
  585. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  586. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  587. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  588. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  589. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  590. OSLOCK_STD(memory_order_acquire),
  591. OSLOCK_STD(memory_order_relaxed))) {
  592. return os_unfair_lock_lock_with_options(lock, options);
  593. }
  594. }
  595. /*!
  596. * @function os_unfair_lock_trylock_inline
  597. *
  598. * @abstract
  599. * Locks an os_unfair_lock if it is not already locked.
  600. *
  601. * @discussion
  602. * It is invalid to surround this function with a retry loop, if this function
  603. * returns false, the program must be able to proceed without having acquired
  604. * the lock, or it must call os_unfair_lock_lock_inline() instead.
  605. *
  606. * @param lock
  607. * Pointer to an os_unfair_lock.
  608. *
  609. * @result
  610. * Returns true if the lock was succesfully locked and false if the lock was
  611. * already locked.
  612. */
  613. OS_UNFAIR_LOCK_AVAILABILITY
  614. OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
  615. bool
  616. os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
  617. {
  618. if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
  619. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  620. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  621. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  622. return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  623. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  624. OSLOCK_STD(memory_order_acquire), OSLOCK_STD(memory_order_relaxed));
  625. }
  626. /*!
  627. * @function os_unfair_lock_unlock_inline
  628. *
  629. * @abstract
  630. * Unlocks an os_unfair_lock.
  631. *
  632. * @param lock
  633. * Pointer to an os_unfair_lock.
  634. */
  635. OS_UNFAIR_LOCK_AVAILABILITY
  636. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  637. void
  638. os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
  639. {
  640. if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
  641. uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
  642. _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
  643. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  644. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  645. (_os_atomic_unfair_lock*)lock, &locked, unlocked,
  646. OSLOCK_STD(memory_order_release),
  647. OSLOCK_STD(memory_order_relaxed))) {
  648. return os_unfair_lock_unlock(lock);
  649. }
  650. }
  651. /*!
  652. * @function os_unfair_lock_lock_inline_no_tsd_4libpthread
  653. *
  654. * @abstract
  655. * Locks an os_unfair_lock, without requiring valid TSD.
  656. *
  657. * This should only be used by libpthread.
  658. *
  659. * @param lock
  660. * Pointer to an os_unfair_lock.
  661. */
  662. OS_UNFAIR_LOCK_AVAILABILITY
  663. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  664. void
  665. os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
  666. {
  667. uint32_t mts = MACH_PORT_DEAD;
  668. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  669. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  670. (_os_atomic_unfair_lock*)lock, &unlocked, locked,
  671. OSLOCK_STD(memory_order_acquire),
  672. OSLOCK_STD(memory_order_relaxed))) {
  673. return os_unfair_lock_lock_no_tsd_4libpthread(lock);
  674. }
  675. }
  676. /*!
  677. * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread
  678. *
  679. * @abstract
  680. * Unlocks an os_unfair_lock, without requiring valid TSD.
  681. *
  682. * This should only be used by libpthread.
  683. *
  684. * @param lock
  685. * Pointer to an os_unfair_lock.
  686. */
  687. OS_UNFAIR_LOCK_AVAILABILITY
  688. OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
  689. void
  690. os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
  691. {
  692. uint32_t mts = MACH_PORT_DEAD;
  693. os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
  694. if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
  695. (_os_atomic_unfair_lock*)lock, &locked, unlocked,
  696. OSLOCK_STD(memory_order_release),
  697. OSLOCK_STD(memory_order_relaxed))) {
  698. return os_unfair_lock_unlock_no_tsd_4libpthread(lock);
  699. }
  700. }
  701. OS_ASSUME_NONNULL_END
  702. #undef OSLOCK_STD
  703. #ifdef __cplusplus
  704. __END_DECLS
  705. } // extern "C++"
  706. #endif
  707. #endif // OS_UNFAIR_LOCK_INLINE
  708. #endif // __OS_LOCK_PRIVATE__