debugobjects.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <[email protected]>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #define pr_fmt(fmt) "ODEBUG: " fmt
  11. #include <linux/debugobjects.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/sched.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/slab.h>
  17. #include <linux/hash.h>
  18. #define ODEBUG_HASH_BITS 14
  19. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  20. #define ODEBUG_POOL_SIZE 1024
  21. #define ODEBUG_POOL_MIN_LEVEL 256
  22. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  23. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  24. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  25. struct debug_bucket {
  26. struct hlist_head list;
  27. raw_spinlock_t lock;
  28. };
  29. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  30. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  31. static DEFINE_RAW_SPINLOCK(pool_lock);
  32. static HLIST_HEAD(obj_pool);
  33. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_free = ODEBUG_POOL_SIZE;
  35. static int obj_pool_used;
  36. static int obj_pool_max_used;
  37. static struct kmem_cache *obj_cache;
  38. static int debug_objects_maxchain __read_mostly;
  39. static int debug_objects_fixups __read_mostly;
  40. static int debug_objects_warnings __read_mostly;
  41. static int debug_objects_enabled __read_mostly
  42. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  43. static struct debug_obj_descr *descr_test __read_mostly;
  44. static void free_obj_work(struct work_struct *work);
  45. static DECLARE_WORK(debug_obj_work, free_obj_work);
  46. static int __init enable_object_debug(char *str)
  47. {
  48. debug_objects_enabled = 1;
  49. return 0;
  50. }
  51. static int __init disable_object_debug(char *str)
  52. {
  53. debug_objects_enabled = 0;
  54. return 0;
  55. }
  56. early_param("debug_objects", enable_object_debug);
  57. early_param("no_debug_objects", disable_object_debug);
  58. static const char *obj_states[ODEBUG_STATE_MAX] = {
  59. [ODEBUG_STATE_NONE] = "none",
  60. [ODEBUG_STATE_INIT] = "initialized",
  61. [ODEBUG_STATE_INACTIVE] = "inactive",
  62. [ODEBUG_STATE_ACTIVE] = "active",
  63. [ODEBUG_STATE_DESTROYED] = "destroyed",
  64. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  65. };
  66. static void fill_pool(void)
  67. {
  68. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  69. struct debug_obj *new;
  70. unsigned long flags;
  71. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  72. return;
  73. if (unlikely(!obj_cache))
  74. return;
  75. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  76. new = kmem_cache_zalloc(obj_cache, gfp);
  77. if (!new)
  78. return;
  79. raw_spin_lock_irqsave(&pool_lock, flags);
  80. hlist_add_head(&new->node, &obj_pool);
  81. obj_pool_free++;
  82. raw_spin_unlock_irqrestore(&pool_lock, flags);
  83. }
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct debug_obj *obj;
  91. int cnt = 0;
  92. hlist_for_each_entry(obj, &b->list, node) {
  93. cnt++;
  94. if (obj->object == addr)
  95. return obj;
  96. }
  97. if (cnt > debug_objects_maxchain)
  98. debug_objects_maxchain = cnt;
  99. return NULL;
  100. }
  101. /*
  102. * Allocate a new object. If the pool is empty, switch off the debugger.
  103. * Must be called with interrupts disabled.
  104. */
  105. static struct debug_obj *
  106. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  107. {
  108. struct debug_obj *obj = NULL;
  109. raw_spin_lock(&pool_lock);
  110. if (obj_pool.first) {
  111. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  112. obj->object = addr;
  113. obj->descr = descr;
  114. obj->state = ODEBUG_STATE_NONE;
  115. obj->astate = 0;
  116. hlist_del(&obj->node);
  117. hlist_add_head(&obj->node, &b->list);
  118. obj_pool_used++;
  119. if (obj_pool_used > obj_pool_max_used)
  120. obj_pool_max_used = obj_pool_used;
  121. obj_pool_free--;
  122. if (obj_pool_free < obj_pool_min_free)
  123. obj_pool_min_free = obj_pool_free;
  124. }
  125. raw_spin_unlock(&pool_lock);
  126. return obj;
  127. }
  128. /*
  129. * workqueue function to free objects.
  130. */
  131. static void free_obj_work(struct work_struct *work)
  132. {
  133. struct debug_obj *obj;
  134. unsigned long flags;
  135. raw_spin_lock_irqsave(&pool_lock, flags);
  136. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  137. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  138. hlist_del(&obj->node);
  139. obj_pool_free--;
  140. /*
  141. * We release pool_lock across kmem_cache_free() to
  142. * avoid contention on pool_lock.
  143. */
  144. raw_spin_unlock_irqrestore(&pool_lock, flags);
  145. kmem_cache_free(obj_cache, obj);
  146. raw_spin_lock_irqsave(&pool_lock, flags);
  147. }
  148. raw_spin_unlock_irqrestore(&pool_lock, flags);
  149. }
  150. /*
  151. * Put the object back into the pool and schedule work to free objects
  152. * if necessary.
  153. */
  154. static void free_object(struct debug_obj *obj)
  155. {
  156. unsigned long flags;
  157. int sched = 0;
  158. raw_spin_lock_irqsave(&pool_lock, flags);
  159. /*
  160. * schedule work when the pool is filled and the cache is
  161. * initialized:
  162. */
  163. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  164. sched = keventd_up();
  165. hlist_add_head(&obj->node, &obj_pool);
  166. obj_pool_free++;
  167. obj_pool_used--;
  168. raw_spin_unlock_irqrestore(&pool_lock, flags);
  169. if (sched)
  170. schedule_work(&debug_obj_work);
  171. }
  172. /*
  173. * We run out of memory. That means we probably have tons of objects
  174. * allocated.
  175. */
  176. static void debug_objects_oom(void)
  177. {
  178. struct debug_bucket *db = obj_hash;
  179. struct hlist_node *tmp;
  180. HLIST_HEAD(freelist);
  181. struct debug_obj *obj;
  182. unsigned long flags;
  183. int i;
  184. pr_warn("Out of memory. ODEBUG disabled\n");
  185. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  186. raw_spin_lock_irqsave(&db->lock, flags);
  187. hlist_move_list(&db->list, &freelist);
  188. raw_spin_unlock_irqrestore(&db->lock, flags);
  189. /* Now free them */
  190. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  191. hlist_del(&obj->node);
  192. free_object(obj);
  193. }
  194. }
  195. }
  196. /*
  197. * We use the pfn of the address for the hash. That way we can check
  198. * for freed objects simply by checking the affected bucket.
  199. */
  200. static struct debug_bucket *get_bucket(unsigned long addr)
  201. {
  202. unsigned long hash;
  203. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  204. return &obj_hash[hash];
  205. }
  206. static void debug_print_object(struct debug_obj *obj, char *msg)
  207. {
  208. struct debug_obj_descr *descr = obj->descr;
  209. static int limit;
  210. if (limit < 5 && descr != descr_test) {
  211. void *hint = descr->debug_hint ?
  212. descr->debug_hint(obj->object) : NULL;
  213. limit++;
  214. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  215. "object type: %s hint: %pS\n",
  216. msg, obj_states[obj->state], obj->astate,
  217. descr->name, hint);
  218. }
  219. debug_objects_warnings++;
  220. }
  221. /*
  222. * Try to repair the damage, so we have a better chance to get useful
  223. * debug output.
  224. */
  225. static bool
  226. debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
  227. void * addr, enum debug_obj_state state)
  228. {
  229. if (fixup && fixup(addr, state)) {
  230. debug_objects_fixups++;
  231. return true;
  232. }
  233. return false;
  234. }
  235. static void debug_object_is_on_stack(void *addr, int onstack)
  236. {
  237. int is_on_stack;
  238. static int limit;
  239. if (limit > 4)
  240. return;
  241. is_on_stack = object_is_on_stack(addr);
  242. if (is_on_stack == onstack)
  243. return;
  244. limit++;
  245. if (is_on_stack)
  246. pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
  247. task_stack_page(current));
  248. else
  249. pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
  250. task_stack_page(current));
  251. WARN_ON(1);
  252. }
  253. static void
  254. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  255. {
  256. enum debug_obj_state state;
  257. struct debug_bucket *db;
  258. struct debug_obj *obj;
  259. unsigned long flags;
  260. fill_pool();
  261. db = get_bucket((unsigned long) addr);
  262. raw_spin_lock_irqsave(&db->lock, flags);
  263. obj = lookup_object(addr, db);
  264. if (!obj) {
  265. obj = alloc_object(addr, db, descr);
  266. if (!obj) {
  267. debug_objects_enabled = 0;
  268. raw_spin_unlock_irqrestore(&db->lock, flags);
  269. debug_objects_oom();
  270. return;
  271. }
  272. debug_object_is_on_stack(addr, onstack);
  273. }
  274. switch (obj->state) {
  275. case ODEBUG_STATE_NONE:
  276. case ODEBUG_STATE_INIT:
  277. case ODEBUG_STATE_INACTIVE:
  278. obj->state = ODEBUG_STATE_INIT;
  279. break;
  280. case ODEBUG_STATE_ACTIVE:
  281. debug_print_object(obj, "init");
  282. state = obj->state;
  283. raw_spin_unlock_irqrestore(&db->lock, flags);
  284. debug_object_fixup(descr->fixup_init, addr, state);
  285. return;
  286. case ODEBUG_STATE_DESTROYED:
  287. debug_print_object(obj, "init");
  288. break;
  289. default:
  290. break;
  291. }
  292. raw_spin_unlock_irqrestore(&db->lock, flags);
  293. }
  294. /**
  295. * debug_object_init - debug checks when an object is initialized
  296. * @addr: address of the object
  297. * @descr: pointer to an object specific debug description structure
  298. */
  299. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  300. {
  301. if (!debug_objects_enabled)
  302. return;
  303. __debug_object_init(addr, descr, 0);
  304. }
  305. EXPORT_SYMBOL_GPL(debug_object_init);
  306. /**
  307. * debug_object_init_on_stack - debug checks when an object on stack is
  308. * initialized
  309. * @addr: address of the object
  310. * @descr: pointer to an object specific debug description structure
  311. */
  312. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  313. {
  314. if (!debug_objects_enabled)
  315. return;
  316. __debug_object_init(addr, descr, 1);
  317. }
  318. EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
  319. /**
  320. * debug_object_activate - debug checks when an object is activated
  321. * @addr: address of the object
  322. * @descr: pointer to an object specific debug description structure
  323. * Returns 0 for success, -EINVAL for check failed.
  324. */
  325. int debug_object_activate(void *addr, struct debug_obj_descr *descr)
  326. {
  327. enum debug_obj_state state;
  328. struct debug_bucket *db;
  329. struct debug_obj *obj;
  330. unsigned long flags;
  331. int ret;
  332. struct debug_obj o = { .object = addr,
  333. .state = ODEBUG_STATE_NOTAVAILABLE,
  334. .descr = descr };
  335. if (!debug_objects_enabled)
  336. return 0;
  337. db = get_bucket((unsigned long) addr);
  338. raw_spin_lock_irqsave(&db->lock, flags);
  339. obj = lookup_object(addr, db);
  340. if (obj) {
  341. switch (obj->state) {
  342. case ODEBUG_STATE_INIT:
  343. case ODEBUG_STATE_INACTIVE:
  344. obj->state = ODEBUG_STATE_ACTIVE;
  345. ret = 0;
  346. break;
  347. case ODEBUG_STATE_ACTIVE:
  348. debug_print_object(obj, "activate");
  349. state = obj->state;
  350. raw_spin_unlock_irqrestore(&db->lock, flags);
  351. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  352. return ret ? 0 : -EINVAL;
  353. case ODEBUG_STATE_DESTROYED:
  354. debug_print_object(obj, "activate");
  355. ret = -EINVAL;
  356. break;
  357. default:
  358. ret = 0;
  359. break;
  360. }
  361. raw_spin_unlock_irqrestore(&db->lock, flags);
  362. return ret;
  363. }
  364. raw_spin_unlock_irqrestore(&db->lock, flags);
  365. /*
  366. * We are here when a static object is activated. We
  367. * let the type specific code confirm whether this is
  368. * true or not. if true, we just make sure that the
  369. * static object is tracked in the object tracker. If
  370. * not, this must be a bug, so we try to fix it up.
  371. */
  372. if (descr->is_static_object && descr->is_static_object(addr)) {
  373. /* track this static object */
  374. debug_object_init(addr, descr);
  375. debug_object_activate(addr, descr);
  376. } else {
  377. debug_print_object(&o, "activate");
  378. ret = debug_object_fixup(descr->fixup_activate, addr,
  379. ODEBUG_STATE_NOTAVAILABLE);
  380. return ret ? 0 : -EINVAL;
  381. }
  382. return 0;
  383. }
  384. EXPORT_SYMBOL_GPL(debug_object_activate);
  385. /**
  386. * debug_object_deactivate - debug checks when an object is deactivated
  387. * @addr: address of the object
  388. * @descr: pointer to an object specific debug description structure
  389. */
  390. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  391. {
  392. struct debug_bucket *db;
  393. struct debug_obj *obj;
  394. unsigned long flags;
  395. if (!debug_objects_enabled)
  396. return;
  397. db = get_bucket((unsigned long) addr);
  398. raw_spin_lock_irqsave(&db->lock, flags);
  399. obj = lookup_object(addr, db);
  400. if (obj) {
  401. switch (obj->state) {
  402. case ODEBUG_STATE_INIT:
  403. case ODEBUG_STATE_INACTIVE:
  404. case ODEBUG_STATE_ACTIVE:
  405. if (!obj->astate)
  406. obj->state = ODEBUG_STATE_INACTIVE;
  407. else
  408. debug_print_object(obj, "deactivate");
  409. break;
  410. case ODEBUG_STATE_DESTROYED:
  411. debug_print_object(obj, "deactivate");
  412. break;
  413. default:
  414. break;
  415. }
  416. } else {
  417. struct debug_obj o = { .object = addr,
  418. .state = ODEBUG_STATE_NOTAVAILABLE,
  419. .descr = descr };
  420. debug_print_object(&o, "deactivate");
  421. }
  422. raw_spin_unlock_irqrestore(&db->lock, flags);
  423. }
  424. EXPORT_SYMBOL_GPL(debug_object_deactivate);
  425. /**
  426. * debug_object_destroy - debug checks when an object is destroyed
  427. * @addr: address of the object
  428. * @descr: pointer to an object specific debug description structure
  429. */
  430. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  431. {
  432. enum debug_obj_state state;
  433. struct debug_bucket *db;
  434. struct debug_obj *obj;
  435. unsigned long flags;
  436. if (!debug_objects_enabled)
  437. return;
  438. db = get_bucket((unsigned long) addr);
  439. raw_spin_lock_irqsave(&db->lock, flags);
  440. obj = lookup_object(addr, db);
  441. if (!obj)
  442. goto out_unlock;
  443. switch (obj->state) {
  444. case ODEBUG_STATE_NONE:
  445. case ODEBUG_STATE_INIT:
  446. case ODEBUG_STATE_INACTIVE:
  447. obj->state = ODEBUG_STATE_DESTROYED;
  448. break;
  449. case ODEBUG_STATE_ACTIVE:
  450. debug_print_object(obj, "destroy");
  451. state = obj->state;
  452. raw_spin_unlock_irqrestore(&db->lock, flags);
  453. debug_object_fixup(descr->fixup_destroy, addr, state);
  454. return;
  455. case ODEBUG_STATE_DESTROYED:
  456. debug_print_object(obj, "destroy");
  457. break;
  458. default:
  459. break;
  460. }
  461. out_unlock:
  462. raw_spin_unlock_irqrestore(&db->lock, flags);
  463. }
  464. EXPORT_SYMBOL_GPL(debug_object_destroy);
  465. /**
  466. * debug_object_free - debug checks when an object is freed
  467. * @addr: address of the object
  468. * @descr: pointer to an object specific debug description structure
  469. */
  470. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  471. {
  472. enum debug_obj_state state;
  473. struct debug_bucket *db;
  474. struct debug_obj *obj;
  475. unsigned long flags;
  476. if (!debug_objects_enabled)
  477. return;
  478. db = get_bucket((unsigned long) addr);
  479. raw_spin_lock_irqsave(&db->lock, flags);
  480. obj = lookup_object(addr, db);
  481. if (!obj)
  482. goto out_unlock;
  483. switch (obj->state) {
  484. case ODEBUG_STATE_ACTIVE:
  485. debug_print_object(obj, "free");
  486. state = obj->state;
  487. raw_spin_unlock_irqrestore(&db->lock, flags);
  488. debug_object_fixup(descr->fixup_free, addr, state);
  489. return;
  490. default:
  491. hlist_del(&obj->node);
  492. raw_spin_unlock_irqrestore(&db->lock, flags);
  493. free_object(obj);
  494. return;
  495. }
  496. out_unlock:
  497. raw_spin_unlock_irqrestore(&db->lock, flags);
  498. }
  499. EXPORT_SYMBOL_GPL(debug_object_free);
  500. /**
  501. * debug_object_assert_init - debug checks when object should be init-ed
  502. * @addr: address of the object
  503. * @descr: pointer to an object specific debug description structure
  504. */
  505. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  506. {
  507. struct debug_bucket *db;
  508. struct debug_obj *obj;
  509. unsigned long flags;
  510. if (!debug_objects_enabled)
  511. return;
  512. db = get_bucket((unsigned long) addr);
  513. raw_spin_lock_irqsave(&db->lock, flags);
  514. obj = lookup_object(addr, db);
  515. if (!obj) {
  516. struct debug_obj o = { .object = addr,
  517. .state = ODEBUG_STATE_NOTAVAILABLE,
  518. .descr = descr };
  519. raw_spin_unlock_irqrestore(&db->lock, flags);
  520. /*
  521. * Maybe the object is static, and we let the type specific
  522. * code confirm. Track this static object if true, else invoke
  523. * fixup.
  524. */
  525. if (descr->is_static_object && descr->is_static_object(addr)) {
  526. /* Track this static object */
  527. debug_object_init(addr, descr);
  528. } else {
  529. debug_print_object(&o, "assert_init");
  530. debug_object_fixup(descr->fixup_assert_init, addr,
  531. ODEBUG_STATE_NOTAVAILABLE);
  532. }
  533. return;
  534. }
  535. raw_spin_unlock_irqrestore(&db->lock, flags);
  536. }
  537. EXPORT_SYMBOL_GPL(debug_object_assert_init);
  538. /**
  539. * debug_object_active_state - debug checks object usage state machine
  540. * @addr: address of the object
  541. * @descr: pointer to an object specific debug description structure
  542. * @expect: expected state
  543. * @next: state to move to if expected state is found
  544. */
  545. void
  546. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  547. unsigned int expect, unsigned int next)
  548. {
  549. struct debug_bucket *db;
  550. struct debug_obj *obj;
  551. unsigned long flags;
  552. if (!debug_objects_enabled)
  553. return;
  554. db = get_bucket((unsigned long) addr);
  555. raw_spin_lock_irqsave(&db->lock, flags);
  556. obj = lookup_object(addr, db);
  557. if (obj) {
  558. switch (obj->state) {
  559. case ODEBUG_STATE_ACTIVE:
  560. if (obj->astate == expect)
  561. obj->astate = next;
  562. else
  563. debug_print_object(obj, "active_state");
  564. break;
  565. default:
  566. debug_print_object(obj, "active_state");
  567. break;
  568. }
  569. } else {
  570. struct debug_obj o = { .object = addr,
  571. .state = ODEBUG_STATE_NOTAVAILABLE,
  572. .descr = descr };
  573. debug_print_object(&o, "active_state");
  574. }
  575. raw_spin_unlock_irqrestore(&db->lock, flags);
  576. }
  577. EXPORT_SYMBOL_GPL(debug_object_active_state);
  578. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  579. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  580. {
  581. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  582. struct hlist_node *tmp;
  583. HLIST_HEAD(freelist);
  584. struct debug_obj_descr *descr;
  585. enum debug_obj_state state;
  586. struct debug_bucket *db;
  587. struct debug_obj *obj;
  588. int cnt;
  589. saddr = (unsigned long) address;
  590. eaddr = saddr + size;
  591. paddr = saddr & ODEBUG_CHUNK_MASK;
  592. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  593. chunks >>= ODEBUG_CHUNK_SHIFT;
  594. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  595. db = get_bucket(paddr);
  596. repeat:
  597. cnt = 0;
  598. raw_spin_lock_irqsave(&db->lock, flags);
  599. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  600. cnt++;
  601. oaddr = (unsigned long) obj->object;
  602. if (oaddr < saddr || oaddr >= eaddr)
  603. continue;
  604. switch (obj->state) {
  605. case ODEBUG_STATE_ACTIVE:
  606. debug_print_object(obj, "free");
  607. descr = obj->descr;
  608. state = obj->state;
  609. raw_spin_unlock_irqrestore(&db->lock, flags);
  610. debug_object_fixup(descr->fixup_free,
  611. (void *) oaddr, state);
  612. goto repeat;
  613. default:
  614. hlist_del(&obj->node);
  615. hlist_add_head(&obj->node, &freelist);
  616. break;
  617. }
  618. }
  619. raw_spin_unlock_irqrestore(&db->lock, flags);
  620. /* Now free them */
  621. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  622. hlist_del(&obj->node);
  623. free_object(obj);
  624. }
  625. if (cnt > debug_objects_maxchain)
  626. debug_objects_maxchain = cnt;
  627. }
  628. }
  629. void debug_check_no_obj_freed(const void *address, unsigned long size)
  630. {
  631. if (debug_objects_enabled)
  632. __debug_check_no_obj_freed(address, size);
  633. }
  634. #endif
  635. #ifdef CONFIG_DEBUG_FS
  636. static int debug_stats_show(struct seq_file *m, void *v)
  637. {
  638. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  639. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  640. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  641. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  642. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  643. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  644. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  645. return 0;
  646. }
  647. static int debug_stats_open(struct inode *inode, struct file *filp)
  648. {
  649. return single_open(filp, debug_stats_show, NULL);
  650. }
  651. static const struct file_operations debug_stats_fops = {
  652. .open = debug_stats_open,
  653. .read = seq_read,
  654. .llseek = seq_lseek,
  655. .release = single_release,
  656. };
  657. static int __init debug_objects_init_debugfs(void)
  658. {
  659. struct dentry *dbgdir, *dbgstats;
  660. if (!debug_objects_enabled)
  661. return 0;
  662. dbgdir = debugfs_create_dir("debug_objects", NULL);
  663. if (!dbgdir)
  664. return -ENOMEM;
  665. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  666. &debug_stats_fops);
  667. if (!dbgstats)
  668. goto err;
  669. return 0;
  670. err:
  671. debugfs_remove(dbgdir);
  672. return -ENOMEM;
  673. }
  674. __initcall(debug_objects_init_debugfs);
  675. #else
  676. static inline void debug_objects_init_debugfs(void) { }
  677. #endif
  678. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  679. /* Random data structure for the self test */
  680. struct self_test {
  681. unsigned long dummy1[6];
  682. int static_init;
  683. unsigned long dummy2[3];
  684. };
  685. static __initdata struct debug_obj_descr descr_type_test;
  686. static bool __init is_static_object(void *addr)
  687. {
  688. struct self_test *obj = addr;
  689. return obj->static_init;
  690. }
  691. /*
  692. * fixup_init is called when:
  693. * - an active object is initialized
  694. */
  695. static bool __init fixup_init(void *addr, enum debug_obj_state state)
  696. {
  697. struct self_test *obj = addr;
  698. switch (state) {
  699. case ODEBUG_STATE_ACTIVE:
  700. debug_object_deactivate(obj, &descr_type_test);
  701. debug_object_init(obj, &descr_type_test);
  702. return true;
  703. default:
  704. return false;
  705. }
  706. }
  707. /*
  708. * fixup_activate is called when:
  709. * - an active object is activated
  710. * - an unknown non-static object is activated
  711. */
  712. static bool __init fixup_activate(void *addr, enum debug_obj_state state)
  713. {
  714. struct self_test *obj = addr;
  715. switch (state) {
  716. case ODEBUG_STATE_NOTAVAILABLE:
  717. return true;
  718. case ODEBUG_STATE_ACTIVE:
  719. debug_object_deactivate(obj, &descr_type_test);
  720. debug_object_activate(obj, &descr_type_test);
  721. return true;
  722. default:
  723. return false;
  724. }
  725. }
  726. /*
  727. * fixup_destroy is called when:
  728. * - an active object is destroyed
  729. */
  730. static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
  731. {
  732. struct self_test *obj = addr;
  733. switch (state) {
  734. case ODEBUG_STATE_ACTIVE:
  735. debug_object_deactivate(obj, &descr_type_test);
  736. debug_object_destroy(obj, &descr_type_test);
  737. return true;
  738. default:
  739. return false;
  740. }
  741. }
  742. /*
  743. * fixup_free is called when:
  744. * - an active object is freed
  745. */
  746. static bool __init fixup_free(void *addr, enum debug_obj_state state)
  747. {
  748. struct self_test *obj = addr;
  749. switch (state) {
  750. case ODEBUG_STATE_ACTIVE:
  751. debug_object_deactivate(obj, &descr_type_test);
  752. debug_object_free(obj, &descr_type_test);
  753. return true;
  754. default:
  755. return false;
  756. }
  757. }
  758. static int __init
  759. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  760. {
  761. struct debug_bucket *db;
  762. struct debug_obj *obj;
  763. unsigned long flags;
  764. int res = -EINVAL;
  765. db = get_bucket((unsigned long) addr);
  766. raw_spin_lock_irqsave(&db->lock, flags);
  767. obj = lookup_object(addr, db);
  768. if (!obj && state != ODEBUG_STATE_NONE) {
  769. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  770. goto out;
  771. }
  772. if (obj && obj->state != state) {
  773. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  774. obj->state, state);
  775. goto out;
  776. }
  777. if (fixups != debug_objects_fixups) {
  778. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  779. fixups, debug_objects_fixups);
  780. goto out;
  781. }
  782. if (warnings != debug_objects_warnings) {
  783. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  784. warnings, debug_objects_warnings);
  785. goto out;
  786. }
  787. res = 0;
  788. out:
  789. raw_spin_unlock_irqrestore(&db->lock, flags);
  790. if (res)
  791. debug_objects_enabled = 0;
  792. return res;
  793. }
  794. static __initdata struct debug_obj_descr descr_type_test = {
  795. .name = "selftest",
  796. .is_static_object = is_static_object,
  797. .fixup_init = fixup_init,
  798. .fixup_activate = fixup_activate,
  799. .fixup_destroy = fixup_destroy,
  800. .fixup_free = fixup_free,
  801. };
  802. static __initdata struct self_test obj = { .static_init = 0 };
  803. static void __init debug_objects_selftest(void)
  804. {
  805. int fixups, oldfixups, warnings, oldwarnings;
  806. unsigned long flags;
  807. local_irq_save(flags);
  808. fixups = oldfixups = debug_objects_fixups;
  809. warnings = oldwarnings = debug_objects_warnings;
  810. descr_test = &descr_type_test;
  811. debug_object_init(&obj, &descr_type_test);
  812. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  813. goto out;
  814. debug_object_activate(&obj, &descr_type_test);
  815. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  816. goto out;
  817. debug_object_activate(&obj, &descr_type_test);
  818. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  819. goto out;
  820. debug_object_deactivate(&obj, &descr_type_test);
  821. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  822. goto out;
  823. debug_object_destroy(&obj, &descr_type_test);
  824. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  825. goto out;
  826. debug_object_init(&obj, &descr_type_test);
  827. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  828. goto out;
  829. debug_object_activate(&obj, &descr_type_test);
  830. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  831. goto out;
  832. debug_object_deactivate(&obj, &descr_type_test);
  833. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  834. goto out;
  835. debug_object_free(&obj, &descr_type_test);
  836. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  837. goto out;
  838. obj.static_init = 1;
  839. debug_object_activate(&obj, &descr_type_test);
  840. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  841. goto out;
  842. debug_object_init(&obj, &descr_type_test);
  843. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  844. goto out;
  845. debug_object_free(&obj, &descr_type_test);
  846. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  847. goto out;
  848. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  849. debug_object_init(&obj, &descr_type_test);
  850. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  851. goto out;
  852. debug_object_activate(&obj, &descr_type_test);
  853. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  854. goto out;
  855. __debug_check_no_obj_freed(&obj, sizeof(obj));
  856. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  857. goto out;
  858. #endif
  859. pr_info("selftest passed\n");
  860. out:
  861. debug_objects_fixups = oldfixups;
  862. debug_objects_warnings = oldwarnings;
  863. descr_test = NULL;
  864. local_irq_restore(flags);
  865. }
  866. #else
  867. static inline void debug_objects_selftest(void) { }
  868. #endif
  869. /*
  870. * Called during early boot to initialize the hash buckets and link
  871. * the static object pool objects into the poll list. After this call
  872. * the object tracker is fully operational.
  873. */
  874. void __init debug_objects_early_init(void)
  875. {
  876. int i;
  877. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  878. raw_spin_lock_init(&obj_hash[i].lock);
  879. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  880. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  881. }
  882. /*
  883. * Convert the statically allocated objects to dynamic ones:
  884. */
  885. static int __init debug_objects_replace_static_objects(void)
  886. {
  887. struct debug_bucket *db = obj_hash;
  888. struct hlist_node *tmp;
  889. struct debug_obj *obj, *new;
  890. HLIST_HEAD(objects);
  891. int i, cnt = 0;
  892. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  893. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  894. if (!obj)
  895. goto free;
  896. hlist_add_head(&obj->node, &objects);
  897. }
  898. /*
  899. * When debug_objects_mem_init() is called we know that only
  900. * one CPU is up, so disabling interrupts is enough
  901. * protection. This avoids the lockdep hell of lock ordering.
  902. */
  903. local_irq_disable();
  904. /* Remove the statically allocated objects from the pool */
  905. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  906. hlist_del(&obj->node);
  907. /* Move the allocated objects to the pool */
  908. hlist_move_list(&objects, &obj_pool);
  909. /* Replace the active object references */
  910. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  911. hlist_move_list(&db->list, &objects);
  912. hlist_for_each_entry(obj, &objects, node) {
  913. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  914. hlist_del(&new->node);
  915. /* copy object data */
  916. *new = *obj;
  917. hlist_add_head(&new->node, &db->list);
  918. cnt++;
  919. }
  920. }
  921. local_irq_enable();
  922. pr_debug("%d of %d active objects replaced\n",
  923. cnt, obj_pool_used);
  924. return 0;
  925. free:
  926. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  927. hlist_del(&obj->node);
  928. kmem_cache_free(obj_cache, obj);
  929. }
  930. return -ENOMEM;
  931. }
  932. /*
  933. * Called after the kmem_caches are functional to setup a dedicated
  934. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  935. * prevents that the debug code is called on kmem_cache_free() for the
  936. * debug tracker objects to avoid recursive calls.
  937. */
  938. void __init debug_objects_mem_init(void)
  939. {
  940. if (!debug_objects_enabled)
  941. return;
  942. obj_cache = kmem_cache_create("debug_objects_cache",
  943. sizeof (struct debug_obj), 0,
  944. SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
  945. NULL);
  946. if (!obj_cache || debug_objects_replace_static_objects()) {
  947. debug_objects_enabled = 0;
  948. if (obj_cache)
  949. kmem_cache_destroy(obj_cache);
  950. pr_warn("out of memory.\n");
  951. } else
  952. debug_objects_selftest();
  953. }