objc-lockdebug.mm 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. /*
  2. * Copyright (c) 2007 Apple Inc. All Rights Reserved.
  3. *
  4. * @APPLE_LICENSE_HEADER_START@
  5. *
  6. * This file contains Original Code and/or Modifications of Original Code
  7. * as defined in and that are subject to the Apple Public Source License
  8. * Version 2.0 (the 'License'). You may not use this file except in
  9. * compliance with the License. Please obtain a copy of the License at
  10. * http://www.opensource.apple.com/apsl/ and read it before using this
  11. * file.
  12. *
  13. * The Original Code and all software distributed under the License are
  14. * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  15. * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  16. * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  18. * Please see the License for the specific language governing rights and
  19. * limitations under the License.
  20. *
  21. * @APPLE_LICENSE_HEADER_END@
  22. */
  23. /***********************************************************************
  24. * objc-lock.m
  25. * Error-checking locks for debugging.
  26. **********************************************************************/
  27. #include "objc-private.h"
  28. #if LOCKDEBUG && !TARGET_OS_WIN32
  29. #include <unordered_map>
  30. /***********************************************************************
  31. * Thread-local bool set during _objc_atfork_prepare().
  32. * That function is allowed to break some lock ordering rules.
  33. **********************************************************************/
  34. static tls_key_t fork_prepare_tls;
  35. void
  36. lockdebug_setInForkPrepare(bool inForkPrepare)
  37. {
  38. INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
  39. tls_set(fork_prepare_tls, (void*)inForkPrepare);
  40. }
  41. static bool
  42. inForkPrepare()
  43. {
  44. INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
  45. return (bool)tls_get(fork_prepare_tls);
  46. }
  47. /***********************************************************************
  48. * Lock order graph.
  49. * "lock X precedes lock Y" means that X must be acquired first.
  50. * This property is transitive.
  51. **********************************************************************/
  52. struct lockorder {
  53. const void *l;
  54. std::vector<const lockorder *> predecessors;
  55. mutable std::unordered_map<const lockorder *, bool> memo;
  56. lockorder(const void *newl) : l(newl) { }
  57. };
  58. static std::unordered_map<const void*, lockorder *> lockOrderList;
  59. // not mutex_t because we don't want lock debugging on this lock
  60. static mutex_tt<false> lockOrderLock;
  61. static bool
  62. lockPrecedesLock(const lockorder *oldlock, const lockorder *newlock)
  63. {
  64. auto memoed = newlock->memo.find(oldlock);
  65. if (memoed != newlock->memo.end()) {
  66. return memoed->second;
  67. }
  68. bool result = false;
  69. for (const auto *pre : newlock->predecessors) {
  70. if (oldlock == pre || lockPrecedesLock(oldlock, pre)) {
  71. result = true;
  72. break;
  73. }
  74. }
  75. newlock->memo[oldlock] = result;
  76. return result;
  77. }
  78. static bool
  79. lockPrecedesLock(const void *oldlock, const void *newlock)
  80. {
  81. mutex_tt<false>::locker lock(lockOrderLock);
  82. auto oldorder = lockOrderList.find(oldlock);
  83. auto neworder = lockOrderList.find(newlock);
  84. if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
  85. return false;
  86. }
  87. return lockPrecedesLock(oldorder->second, neworder->second);
  88. }
  89. static bool
  90. lockUnorderedWithLock(const void *oldlock, const void *newlock)
  91. {
  92. mutex_tt<false>::locker lock(lockOrderLock);
  93. auto oldorder = lockOrderList.find(oldlock);
  94. auto neworder = lockOrderList.find(newlock);
  95. if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
  96. return true;
  97. }
  98. if (lockPrecedesLock(oldorder->second, neworder->second) ||
  99. lockPrecedesLock(neworder->second, oldorder->second))
  100. {
  101. return false;
  102. }
  103. return true;
  104. }
  105. void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock)
  106. {
  107. if (lockPrecedesLock(newlock, oldlock)) {
  108. _objc_fatal("contradiction in lock order declaration");
  109. }
  110. mutex_tt<false>::locker lock(lockOrderLock);
  111. auto oldorder = lockOrderList.find(oldlock);
  112. auto neworder = lockOrderList.find(newlock);
  113. if (oldorder == lockOrderList.end()) {
  114. lockOrderList[oldlock] = new lockorder(oldlock);
  115. oldorder = lockOrderList.find(oldlock);
  116. }
  117. if (neworder == lockOrderList.end()) {
  118. lockOrderList[newlock] = new lockorder(newlock);
  119. neworder = lockOrderList.find(newlock);
  120. }
  121. neworder->second->predecessors.push_back(oldorder->second);
  122. }
  123. /***********************************************************************
  124. * Recording - per-thread list of mutexes and monitors held
  125. **********************************************************************/
  126. enum class lockkind {
  127. MUTEX = 1, MONITOR = 2, RDLOCK = 3, WRLOCK = 4, RECURSIVE = 5
  128. };
  129. #define MUTEX lockkind::MUTEX
  130. #define MONITOR lockkind::MONITOR
  131. #define RDLOCK lockkind::RDLOCK
  132. #define WRLOCK lockkind::WRLOCK
  133. #define RECURSIVE lockkind::RECURSIVE
  134. struct lockcount {
  135. lockkind k; // the kind of lock it is (MUTEX, MONITOR, etc)
  136. int i; // the lock's nest count
  137. };
  138. using objc_lock_list = std::unordered_map<const void *, lockcount>;
  139. // Thread-local list of locks owned by a thread.
  140. // Used by lock ownership checks.
  141. static tls_key_t lock_tls;
  142. // Global list of all locks.
  143. // Used by fork() safety check.
  144. // This can't be a static struct because of C++ initialization order problems.
  145. static objc_lock_list& AllLocks() {
  146. static objc_lock_list *locks;
  147. INIT_ONCE_PTR(locks, new objc_lock_list, (void)0);
  148. return *locks;
  149. }
  150. static void
  151. destroyLocks(void *value)
  152. {
  153. auto locks = (objc_lock_list *)value;
  154. // fixme complain about any still-held locks?
  155. if (locks) delete locks;
  156. }
  157. static objc_lock_list&
  158. ownedLocks()
  159. {
  160. // Use a dedicated tls key to prevent differences vs non-debug in
  161. // usage of objc's other tls keys (required for some unit tests).
  162. INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
  163. auto locks = (objc_lock_list *)tls_get(lock_tls);
  164. if (!locks) {
  165. locks = new objc_lock_list;
  166. tls_set(lock_tls, locks);
  167. }
  168. return *locks;
  169. }
  170. static bool
  171. hasLock(objc_lock_list& locks, const void *lock, lockkind kind)
  172. {
  173. auto iter = locks.find(lock);
  174. if (iter != locks.end() && iter->second.k == kind) return true;
  175. return false;
  176. }
  177. static const char *sym(const void *lock)
  178. {
  179. Dl_info info;
  180. int ok = dladdr(lock, &info);
  181. if (ok && info.dli_sname && info.dli_sname[0]) return info.dli_sname;
  182. else return "??";
  183. }
  184. static void
  185. setLock(objc_lock_list& locks, const void *lock, lockkind kind)
  186. {
  187. // Check if we already own this lock.
  188. auto iter = locks.find(lock);
  189. if (iter != locks.end() && iter->second.k == kind) {
  190. iter->second.i++;
  191. return;
  192. }
  193. // Newly-acquired lock. Verify lock ordering.
  194. // Locks not in AllLocks are exempt (i.e. @synchronize locks)
  195. if (&locks != &AllLocks() && AllLocks().find(lock) != AllLocks().end()) {
  196. for (auto& oldlock : locks) {
  197. if (AllLocks().find(oldlock.first) == AllLocks().end()) {
  198. // oldlock is exempt
  199. continue;
  200. }
  201. if (lockPrecedesLock(lock, oldlock.first)) {
  202. _objc_fatal("lock %p (%s) incorrectly acquired before %p (%s)",
  203. oldlock.first, sym(oldlock.first), lock, sym(lock));
  204. }
  205. if (!inForkPrepare() &&
  206. lockUnorderedWithLock(lock, oldlock.first))
  207. {
  208. // _objc_atfork_prepare is allowed to acquire
  209. // otherwise-unordered locks, but nothing else may.
  210. _objc_fatal("lock %p (%s) acquired before %p (%s) "
  211. "with no defined lock order",
  212. oldlock.first, sym(oldlock.first), lock, sym(lock));
  213. }
  214. }
  215. }
  216. locks[lock] = lockcount{kind, 1};
  217. }
  218. static void
  219. clearLock(objc_lock_list& locks, const void *lock, lockkind kind)
  220. {
  221. auto iter = locks.find(lock);
  222. if (iter != locks.end()) {
  223. auto& l = iter->second;
  224. if (l.k == kind) {
  225. if (--l.i == 0) {
  226. locks.erase(iter);
  227. }
  228. return;
  229. }
  230. }
  231. _objc_fatal("lock not found!");
  232. }
  233. /***********************************************************************
  234. * fork() safety checking
  235. **********************************************************************/
  236. void
  237. lockdebug_remember_mutex(mutex_t *lock)
  238. {
  239. setLock(AllLocks(), lock, MUTEX);
  240. }
  241. void
  242. lockdebug_remember_recursive_mutex(recursive_mutex_t *lock)
  243. {
  244. setLock(AllLocks(), lock, RECURSIVE);
  245. }
  246. void
  247. lockdebug_remember_monitor(monitor_t *lock)
  248. {
  249. setLock(AllLocks(), lock, MONITOR);
  250. }
  251. void
  252. lockdebug_assert_all_locks_locked()
  253. {
  254. auto& owned = ownedLocks();
  255. for (const auto& l : AllLocks()) {
  256. if (!hasLock(owned, l.first, l.second.k)) {
  257. _objc_fatal("lock %p:%d is incorrectly not owned",
  258. l.first, l.second.k);
  259. }
  260. }
  261. }
  262. void
  263. lockdebug_assert_no_locks_locked()
  264. {
  265. auto& owned = ownedLocks();
  266. for (const auto& l : AllLocks()) {
  267. if (hasLock(owned, l.first, l.second.k)) {
  268. _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
  269. }
  270. }
  271. }
  272. /***********************************************************************
  273. * Mutex checking
  274. **********************************************************************/
  275. void
  276. lockdebug_mutex_lock(mutex_t *lock)
  277. {
  278. auto& locks = ownedLocks();
  279. if (hasLock(locks, lock, MUTEX)) {
  280. _objc_fatal("deadlock: relocking mutex");
  281. }
  282. setLock(locks, lock, MUTEX);
  283. }
  284. // try-lock success is the only case with lockdebug effects.
  285. // try-lock when already locked is OK (will fail)
  286. // try-lock failure does nothing.
  287. void
  288. lockdebug_mutex_try_lock_success(mutex_t *lock)
  289. {
  290. auto& locks = ownedLocks();
  291. setLock(locks, lock, MUTEX);
  292. }
  293. void
  294. lockdebug_mutex_unlock(mutex_t *lock)
  295. {
  296. auto& locks = ownedLocks();
  297. if (!hasLock(locks, lock, MUTEX)) {
  298. _objc_fatal("unlocking unowned mutex");
  299. }
  300. clearLock(locks, lock, MUTEX);
  301. }
  302. void
  303. lockdebug_mutex_assert_locked(mutex_t *lock)
  304. {
  305. auto& locks = ownedLocks();
  306. if (!hasLock(locks, lock, MUTEX)) {
  307. _objc_fatal("mutex incorrectly not locked");
  308. }
  309. }
  310. void
  311. lockdebug_mutex_assert_unlocked(mutex_t *lock)
  312. {
  313. auto& locks = ownedLocks();
  314. if (hasLock(locks, lock, MUTEX)) {
  315. _objc_fatal("mutex incorrectly locked");
  316. }
  317. }
  318. /***********************************************************************
  319. * Recursive mutex checking
  320. **********************************************************************/
  321. void
  322. lockdebug_recursive_mutex_lock(recursive_mutex_t *lock)
  323. {
  324. auto& locks = ownedLocks();
  325. setLock(locks, lock, RECURSIVE);
  326. }
  327. void
  328. lockdebug_recursive_mutex_unlock(recursive_mutex_t *lock)
  329. {
  330. auto& locks = ownedLocks();
  331. if (!hasLock(locks, lock, RECURSIVE)) {
  332. _objc_fatal("unlocking unowned recursive mutex");
  333. }
  334. clearLock(locks, lock, RECURSIVE);
  335. }
  336. void
  337. lockdebug_recursive_mutex_assert_locked(recursive_mutex_t *lock)
  338. {
  339. auto& locks = ownedLocks();
  340. if (!hasLock(locks, lock, RECURSIVE)) {
  341. _objc_fatal("recursive mutex incorrectly not locked");
  342. }
  343. }
  344. void
  345. lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_t *lock)
  346. {
  347. auto& locks = ownedLocks();
  348. if (hasLock(locks, lock, RECURSIVE)) {
  349. _objc_fatal("recursive mutex incorrectly locked");
  350. }
  351. }
  352. /***********************************************************************
  353. * Monitor checking
  354. **********************************************************************/
  355. void
  356. lockdebug_monitor_enter(monitor_t *lock)
  357. {
  358. auto& locks = ownedLocks();
  359. if (hasLock(locks, lock, MONITOR)) {
  360. _objc_fatal("deadlock: relocking monitor");
  361. }
  362. setLock(locks, lock, MONITOR);
  363. }
  364. void
  365. lockdebug_monitor_leave(monitor_t *lock)
  366. {
  367. auto& locks = ownedLocks();
  368. if (!hasLock(locks, lock, MONITOR)) {
  369. _objc_fatal("unlocking unowned monitor");
  370. }
  371. clearLock(locks, lock, MONITOR);
  372. }
  373. void
  374. lockdebug_monitor_wait(monitor_t *lock)
  375. {
  376. auto& locks = ownedLocks();
  377. if (!hasLock(locks, lock, MONITOR)) {
  378. _objc_fatal("waiting in unowned monitor");
  379. }
  380. }
  381. void
  382. lockdebug_monitor_assert_locked(monitor_t *lock)
  383. {
  384. auto& locks = ownedLocks();
  385. if (!hasLock(locks, lock, MONITOR)) {
  386. _objc_fatal("monitor incorrectly not locked");
  387. }
  388. }
  389. void
  390. lockdebug_monitor_assert_unlocked(monitor_t *lock)
  391. {
  392. auto& locks = ownedLocks();
  393. if (hasLock(locks, lock, MONITOR)) {
  394. _objc_fatal("monitor incorrectly held");
  395. }
  396. }
  397. #endif