kmem.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977
  1. /* Copyright (c) 2016, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #undef TRACE_SYSTEM
  13. #define TRACE_SYSTEM kmem
  14. #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
  15. #define _TRACE_KMEM_H
  16. #include <linux/types.h>
  17. #include <linux/tracepoint.h>
  18. #include <trace/events/mmflags.h>
  19. DECLARE_EVENT_CLASS(kmem_alloc,
  20. TP_PROTO(unsigned long call_site,
  21. const void *ptr,
  22. size_t bytes_req,
  23. size_t bytes_alloc,
  24. gfp_t gfp_flags),
  25. TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
  26. TP_STRUCT__entry(
  27. __field( unsigned long, call_site )
  28. __field( const void *, ptr )
  29. __field( size_t, bytes_req )
  30. __field( size_t, bytes_alloc )
  31. __field( gfp_t, gfp_flags )
  32. ),
  33. TP_fast_assign(
  34. __entry->call_site = call_site;
  35. __entry->ptr = ptr;
  36. __entry->bytes_req = bytes_req;
  37. __entry->bytes_alloc = bytes_alloc;
  38. __entry->gfp_flags = gfp_flags;
  39. ),
  40. TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
  41. __entry->call_site,
  42. __entry->ptr,
  43. __entry->bytes_req,
  44. __entry->bytes_alloc,
  45. show_gfp_flags(__entry->gfp_flags))
  46. );
  47. DEFINE_EVENT(kmem_alloc, kmalloc,
  48. TP_PROTO(unsigned long call_site, const void *ptr,
  49. size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
  50. TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
  51. );
  52. DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
  53. TP_PROTO(unsigned long call_site, const void *ptr,
  54. size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
  55. TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
  56. );
  57. DECLARE_EVENT_CLASS(kmem_alloc_node,
  58. TP_PROTO(unsigned long call_site,
  59. const void *ptr,
  60. size_t bytes_req,
  61. size_t bytes_alloc,
  62. gfp_t gfp_flags,
  63. int node),
  64. TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
  65. TP_STRUCT__entry(
  66. __field( unsigned long, call_site )
  67. __field( const void *, ptr )
  68. __field( size_t, bytes_req )
  69. __field( size_t, bytes_alloc )
  70. __field( gfp_t, gfp_flags )
  71. __field( int, node )
  72. ),
  73. TP_fast_assign(
  74. __entry->call_site = call_site;
  75. __entry->ptr = ptr;
  76. __entry->bytes_req = bytes_req;
  77. __entry->bytes_alloc = bytes_alloc;
  78. __entry->gfp_flags = gfp_flags;
  79. __entry->node = node;
  80. ),
  81. TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
  82. __entry->call_site,
  83. __entry->ptr,
  84. __entry->bytes_req,
  85. __entry->bytes_alloc,
  86. show_gfp_flags(__entry->gfp_flags),
  87. __entry->node)
  88. );
  89. DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
  90. TP_PROTO(unsigned long call_site, const void *ptr,
  91. size_t bytes_req, size_t bytes_alloc,
  92. gfp_t gfp_flags, int node),
  93. TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
  94. );
  95. DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
  96. TP_PROTO(unsigned long call_site, const void *ptr,
  97. size_t bytes_req, size_t bytes_alloc,
  98. gfp_t gfp_flags, int node),
  99. TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
  100. );
  101. DECLARE_EVENT_CLASS(kmem_free,
  102. TP_PROTO(unsigned long call_site, const void *ptr),
  103. TP_ARGS(call_site, ptr),
  104. TP_STRUCT__entry(
  105. __field( unsigned long, call_site )
  106. __field( const void *, ptr )
  107. ),
  108. TP_fast_assign(
  109. __entry->call_site = call_site;
  110. __entry->ptr = ptr;
  111. ),
  112. TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
  113. );
  114. DEFINE_EVENT(kmem_free, kfree,
  115. TP_PROTO(unsigned long call_site, const void *ptr),
  116. TP_ARGS(call_site, ptr)
  117. );
  118. DEFINE_EVENT(kmem_free, kmem_cache_free,
  119. TP_PROTO(unsigned long call_site, const void *ptr),
  120. TP_ARGS(call_site, ptr)
  121. );
  122. TRACE_EVENT(mm_page_free,
  123. TP_PROTO(struct page *page, unsigned int order),
  124. TP_ARGS(page, order),
  125. TP_STRUCT__entry(
  126. __field( unsigned long, pfn )
  127. __field( unsigned int, order )
  128. ),
  129. TP_fast_assign(
  130. __entry->pfn = page_to_pfn(page);
  131. __entry->order = order;
  132. ),
  133. TP_printk("page=%p pfn=%lu order=%d",
  134. pfn_to_page(__entry->pfn),
  135. __entry->pfn,
  136. __entry->order)
  137. );
  138. TRACE_EVENT(mm_page_free_batched,
  139. TP_PROTO(struct page *page, int cold),
  140. TP_ARGS(page, cold),
  141. TP_STRUCT__entry(
  142. __field( unsigned long, pfn )
  143. __field( int, cold )
  144. ),
  145. TP_fast_assign(
  146. __entry->pfn = page_to_pfn(page);
  147. __entry->cold = cold;
  148. ),
  149. TP_printk("page=%p pfn=%lu order=0 cold=%d",
  150. pfn_to_page(__entry->pfn),
  151. __entry->pfn,
  152. __entry->cold)
  153. );
  154. TRACE_EVENT(mm_page_alloc,
  155. TP_PROTO(struct page *page, unsigned int order,
  156. gfp_t gfp_flags, int migratetype),
  157. TP_ARGS(page, order, gfp_flags, migratetype),
  158. TP_STRUCT__entry(
  159. __field( unsigned long, pfn )
  160. __field( unsigned int, order )
  161. __field( gfp_t, gfp_flags )
  162. __field( int, migratetype )
  163. ),
  164. TP_fast_assign(
  165. __entry->pfn = page ? page_to_pfn(page) : -1UL;
  166. __entry->order = order;
  167. __entry->gfp_flags = gfp_flags;
  168. __entry->migratetype = migratetype;
  169. ),
  170. TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
  171. __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
  172. __entry->pfn != -1UL ? __entry->pfn : 0,
  173. __entry->order,
  174. __entry->migratetype,
  175. show_gfp_flags(__entry->gfp_flags))
  176. );
  177. DECLARE_EVENT_CLASS(mm_page,
  178. TP_PROTO(struct page *page, unsigned int order, int migratetype),
  179. TP_ARGS(page, order, migratetype),
  180. TP_STRUCT__entry(
  181. __field( unsigned long, pfn )
  182. __field( unsigned int, order )
  183. __field( int, migratetype )
  184. ),
  185. TP_fast_assign(
  186. __entry->pfn = page ? page_to_pfn(page) : -1UL;
  187. __entry->order = order;
  188. __entry->migratetype = migratetype;
  189. ),
  190. TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
  191. __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
  192. __entry->pfn != -1UL ? __entry->pfn : 0,
  193. __entry->order,
  194. __entry->migratetype,
  195. __entry->order == 0)
  196. );
  197. DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
  198. TP_PROTO(struct page *page, unsigned int order, int migratetype),
  199. TP_ARGS(page, order, migratetype)
  200. );
  201. TRACE_EVENT(mm_page_pcpu_drain,
  202. TP_PROTO(struct page *page, unsigned int order, int migratetype),
  203. TP_ARGS(page, order, migratetype),
  204. TP_STRUCT__entry(
  205. __field( unsigned long, pfn )
  206. __field( unsigned int, order )
  207. __field( int, migratetype )
  208. ),
  209. TP_fast_assign(
  210. __entry->pfn = page ? page_to_pfn(page) : -1UL;
  211. __entry->order = order;
  212. __entry->migratetype = migratetype;
  213. ),
  214. TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
  215. pfn_to_page(__entry->pfn), __entry->pfn,
  216. __entry->order, __entry->migratetype)
  217. );
  218. TRACE_EVENT(mm_page_alloc_extfrag,
  219. TP_PROTO(struct page *page,
  220. int alloc_order, int fallback_order,
  221. int alloc_migratetype, int fallback_migratetype),
  222. TP_ARGS(page,
  223. alloc_order, fallback_order,
  224. alloc_migratetype, fallback_migratetype),
  225. TP_STRUCT__entry(
  226. __field( unsigned long, pfn )
  227. __field( int, alloc_order )
  228. __field( int, fallback_order )
  229. __field( int, alloc_migratetype )
  230. __field( int, fallback_migratetype )
  231. __field( int, change_ownership )
  232. ),
  233. TP_fast_assign(
  234. __entry->pfn = page_to_pfn(page);
  235. __entry->alloc_order = alloc_order;
  236. __entry->fallback_order = fallback_order;
  237. __entry->alloc_migratetype = alloc_migratetype;
  238. __entry->fallback_migratetype = fallback_migratetype;
  239. __entry->change_ownership = (alloc_migratetype ==
  240. get_pageblock_migratetype(page));
  241. ),
  242. TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
  243. pfn_to_page(__entry->pfn),
  244. __entry->pfn,
  245. __entry->alloc_order,
  246. __entry->fallback_order,
  247. pageblock_order,
  248. __entry->alloc_migratetype,
  249. __entry->fallback_migratetype,
  250. __entry->fallback_order < pageblock_order,
  251. __entry->change_ownership)
  252. );
  253. TRACE_EVENT(ion_heap_shrink,
  254. TP_PROTO(const char *heap_name,
  255. size_t len,
  256. long total_allocated),
  257. TP_ARGS(heap_name, len, total_allocated),
  258. TP_STRUCT__entry(
  259. __string(heap_name, heap_name)
  260. __field(size_t, len)
  261. __field(long, total_allocated)
  262. ),
  263. TP_fast_assign(
  264. __assign_str(heap_name, heap_name);
  265. __entry->len = len;
  266. __entry->total_allocated = total_allocated;
  267. ),
  268. TP_printk("heap_name=%s, len=%zu, total_allocated=%ld",
  269. __get_str(heap_name), __entry->len, __entry->total_allocated)
  270. );
  271. TRACE_EVENT(ion_heap_grow,
  272. TP_PROTO(const char *heap_name,
  273. size_t len,
  274. long total_allocated),
  275. TP_ARGS(heap_name, len, total_allocated),
  276. TP_STRUCT__entry(
  277. __string(heap_name, heap_name)
  278. __field(size_t, len)
  279. __field(long, total_allocated)
  280. ),
  281. TP_fast_assign(
  282. __assign_str(heap_name, heap_name);
  283. __entry->len = len;
  284. __entry->total_allocated = total_allocated;
  285. ),
  286. TP_printk("heap_name=%s, len=%zu, total_allocated=%ld",
  287. __get_str(heap_name), __entry->len, __entry->total_allocated)
  288. );
  289. DECLARE_EVENT_CLASS(ion_alloc,
  290. TP_PROTO(const char *client_name,
  291. const char *heap_name,
  292. size_t len,
  293. unsigned int mask,
  294. unsigned int flags),
  295. TP_ARGS(client_name, heap_name, len, mask, flags),
  296. TP_STRUCT__entry(
  297. __array(char, client_name, 64)
  298. __string(heap_name, heap_name)
  299. __field(size_t, len)
  300. __field(unsigned int, mask)
  301. __field(unsigned int, flags)
  302. ),
  303. TP_fast_assign(
  304. strlcpy(__entry->client_name, client_name, 64);
  305. __assign_str(heap_name, heap_name);
  306. __entry->len = len;
  307. __entry->mask = mask;
  308. __entry->flags = flags;
  309. ),
  310. TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
  311. __entry->client_name,
  312. __get_str(heap_name),
  313. __entry->len,
  314. __entry->mask,
  315. __entry->flags)
  316. );
  317. DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
  318. TP_PROTO(const char *client_name,
  319. const char *heap_name,
  320. size_t len,
  321. unsigned int mask,
  322. unsigned int flags),
  323. TP_ARGS(client_name, heap_name, len, mask, flags)
  324. );
  325. DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
  326. TP_PROTO(const char *client_name,
  327. const char *heap_name,
  328. size_t len,
  329. unsigned int mask,
  330. unsigned int flags),
  331. TP_ARGS(client_name, heap_name, len, mask, flags)
  332. );
  333. DECLARE_EVENT_CLASS(ion_alloc_error,
  334. TP_PROTO(const char *client_name,
  335. const char *heap_name,
  336. size_t len,
  337. unsigned int mask,
  338. unsigned int flags,
  339. long error),
  340. TP_ARGS(client_name, heap_name, len, mask, flags, error),
  341. TP_STRUCT__entry(
  342. __field(const char *, client_name)
  343. __string(heap_name, heap_name)
  344. __field(size_t, len)
  345. __field(unsigned int, mask)
  346. __field(unsigned int, flags)
  347. __field(long, error)
  348. ),
  349. TP_fast_assign(
  350. __entry->client_name = client_name;
  351. __assign_str(heap_name, heap_name);
  352. __entry->len = len;
  353. __entry->mask = mask;
  354. __entry->flags = flags;
  355. __entry->error = error;
  356. ),
  357. TP_printk(
  358. "client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
  359. __entry->client_name,
  360. __get_str(heap_name),
  361. __entry->len,
  362. __entry->mask,
  363. __entry->flags,
  364. __entry->error)
  365. );
  366. DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
  367. TP_PROTO(const char *client_name,
  368. const char *heap_name,
  369. size_t len,
  370. unsigned int mask,
  371. unsigned int flags,
  372. long error),
  373. TP_ARGS(client_name, heap_name, len, mask, flags, error)
  374. );
  375. DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
  376. TP_PROTO(const char *client_name,
  377. const char *heap_name,
  378. size_t len,
  379. unsigned int mask,
  380. unsigned int flags,
  381. long error),
  382. TP_ARGS(client_name, heap_name, len, mask, flags, error)
  383. );
  384. DECLARE_EVENT_CLASS(alloc_retry,
  385. TP_PROTO(int tries),
  386. TP_ARGS(tries),
  387. TP_STRUCT__entry(
  388. __field(int, tries)
  389. ),
  390. TP_fast_assign(
  391. __entry->tries = tries;
  392. ),
  393. TP_printk("tries=%d",
  394. __entry->tries)
  395. );
  396. DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
  397. TP_PROTO(int tries),
  398. TP_ARGS(tries)
  399. );
  400. DEFINE_EVENT(alloc_retry, migrate_retry,
  401. TP_PROTO(int tries),
  402. TP_ARGS(tries)
  403. );
  404. DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
  405. TP_PROTO(int tries),
  406. TP_ARGS(tries)
  407. );
  408. DECLARE_EVENT_CLASS(migrate_pages,
  409. TP_PROTO(int mode),
  410. TP_ARGS(mode),
  411. TP_STRUCT__entry(
  412. __field(int, mode)
  413. ),
  414. TP_fast_assign(
  415. __entry->mode = mode;
  416. ),
  417. TP_printk("mode=%d",
  418. __entry->mode)
  419. );
  420. DEFINE_EVENT(migrate_pages, migrate_pages_start,
  421. TP_PROTO(int mode),
  422. TP_ARGS(mode)
  423. );
  424. DEFINE_EVENT(migrate_pages, migrate_pages_end,
  425. TP_PROTO(int mode),
  426. TP_ARGS(mode)
  427. );
  428. DECLARE_EVENT_CLASS(ion_alloc_pages,
  429. TP_PROTO(gfp_t gfp_flags,
  430. unsigned int order),
  431. TP_ARGS(gfp_flags, order),
  432. TP_STRUCT__entry(
  433. __field(gfp_t, gfp_flags)
  434. __field(unsigned int, order)
  435. ),
  436. TP_fast_assign(
  437. __entry->gfp_flags = gfp_flags;
  438. __entry->order = order;
  439. ),
  440. TP_printk("gfp_flags=%s order=%d",
  441. show_gfp_flags(__entry->gfp_flags),
  442. __entry->order)
  443. );
  444. DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
  445. TP_PROTO(gfp_t gfp_flags,
  446. unsigned int order),
  447. TP_ARGS(gfp_flags, order)
  448. );
  449. DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
  450. TP_PROTO(gfp_t gfp_flags,
  451. unsigned int order),
  452. TP_ARGS(gfp_flags, order)
  453. );
  454. DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
  455. TP_PROTO(gfp_t gfp_flags,
  456. unsigned int order),
  457. TP_ARGS(gfp_flags, order)
  458. );
  459. DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
  460. TP_PROTO(gfp_t gfp_flags,
  461. unsigned int order),
  462. TP_ARGS(gfp_flags, order)
  463. );
  464. DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
  465. TP_PROTO(gfp_t gfp_flags,
  466. unsigned int order),
  467. TP_ARGS(gfp_flags, order)
  468. );
  469. DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
  470. TP_PROTO(gfp_t gfp_flags,
  471. unsigned int order),
  472. TP_ARGS(gfp_flags, order)
  473. );
  474. DECLARE_EVENT_CLASS(smmu_map,
  475. TP_PROTO(unsigned long va,
  476. phys_addr_t pa,
  477. unsigned long chunk_size,
  478. size_t len),
  479. TP_ARGS(va, pa, chunk_size, len),
  480. TP_STRUCT__entry(
  481. __field(unsigned long, va)
  482. __field(phys_addr_t, pa)
  483. __field(unsigned long, chunk_size)
  484. __field(size_t, len)
  485. ),
  486. TP_fast_assign(
  487. __entry->va = va;
  488. __entry->pa = pa;
  489. __entry->chunk_size = chunk_size;
  490. __entry->len = len;
  491. ),
  492. TP_printk("v_addr=%p p_addr=%pa chunk_size=0x%lx len=%zu",
  493. (void *)__entry->va,
  494. &__entry->pa,
  495. __entry->chunk_size,
  496. __entry->len)
  497. );
  498. DEFINE_EVENT(smmu_map, iommu_map_range,
  499. TP_PROTO(unsigned long va,
  500. phys_addr_t pa,
  501. unsigned long chunk_size,
  502. size_t len),
  503. TP_ARGS(va, pa, chunk_size, len)
  504. );
  505. DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool,
  506. TP_PROTO(unsigned long len,
  507. int pool_total,
  508. bool is_prefetch),
  509. TP_ARGS(len, pool_total, is_prefetch),
  510. TP_STRUCT__entry(
  511. __field(unsigned long, len)
  512. __field(int, pool_total)
  513. __field(bool, is_prefetch)
  514. ),
  515. TP_fast_assign(
  516. __entry->len = len;
  517. __entry->pool_total = pool_total;
  518. __entry->is_prefetch = is_prefetch;
  519. ),
  520. TP_printk("len %lx, pool total %x is_prefetch %d",
  521. __entry->len,
  522. __entry->pool_total,
  523. __entry->is_prefetch)
  524. );
  525. DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start,
  526. TP_PROTO(unsigned long len,
  527. int pool_total,
  528. bool is_prefetch),
  529. TP_ARGS(len, pool_total, is_prefetch)
  530. );
  531. DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end,
  532. TP_PROTO(unsigned long len,
  533. int pool_total,
  534. bool is_prefetch),
  535. TP_ARGS(len, pool_total, is_prefetch)
  536. );
  537. DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool,
  538. TP_PROTO(unsigned long drained_size,
  539. unsigned long skipped_size),
  540. TP_ARGS(drained_size, skipped_size),
  541. TP_STRUCT__entry(
  542. __field(unsigned long, drained_size)
  543. __field(unsigned long, skipped_size)
  544. ),
  545. TP_fast_assign(
  546. __entry->drained_size = drained_size;
  547. __entry->skipped_size = skipped_size;
  548. ),
  549. TP_printk("drained size %lx, skipped size %lx",
  550. __entry->drained_size,
  551. __entry->skipped_size)
  552. );
  553. DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start,
  554. TP_PROTO(unsigned long drained_size,
  555. unsigned long skipped_size),
  556. TP_ARGS(drained_size, skipped_size)
  557. );
  558. DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end,
  559. TP_PROTO(unsigned long drained_size,
  560. unsigned long skipped_size),
  561. TP_ARGS(drained_size, skipped_size)
  562. );
  563. TRACE_EVENT(ion_prefetching,
  564. TP_PROTO(unsigned long len),
  565. TP_ARGS(len),
  566. TP_STRUCT__entry(
  567. __field(unsigned long, len)
  568. ),
  569. TP_fast_assign(
  570. __entry->len = len;
  571. ),
  572. TP_printk("prefetch size %lx",
  573. __entry->len)
  574. );
  575. DECLARE_EVENT_CLASS(ion_secure_cma_allocate,
  576. TP_PROTO(const char *heap_name,
  577. unsigned long len,
  578. unsigned long align,
  579. unsigned long flags),
  580. TP_ARGS(heap_name, len, align, flags),
  581. TP_STRUCT__entry(
  582. __string(heap_name, heap_name)
  583. __field(unsigned long, len)
  584. __field(unsigned long, align)
  585. __field(unsigned long, flags)
  586. ),
  587. TP_fast_assign(
  588. __assign_str(heap_name, heap_name);
  589. __entry->len = len;
  590. __entry->align = align;
  591. __entry->flags = flags;
  592. ),
  593. TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
  594. __get_str(heap_name),
  595. __entry->len,
  596. __entry->align,
  597. __entry->flags)
  598. );
  599. DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_start,
  600. TP_PROTO(const char *heap_name,
  601. unsigned long len,
  602. unsigned long align,
  603. unsigned long flags),
  604. TP_ARGS(heap_name, len, align, flags)
  605. );
  606. DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_end,
  607. TP_PROTO(const char *heap_name,
  608. unsigned long len,
  609. unsigned long align,
  610. unsigned long flags),
  611. TP_ARGS(heap_name, len, align, flags)
  612. );
  613. DECLARE_EVENT_CLASS(ion_cp_secure_buffer,
  614. TP_PROTO(const char *heap_name,
  615. unsigned long len,
  616. unsigned long align,
  617. unsigned long flags),
  618. TP_ARGS(heap_name, len, align, flags),
  619. TP_STRUCT__entry(
  620. __string(heap_name, heap_name)
  621. __field(unsigned long, len)
  622. __field(unsigned long, align)
  623. __field(unsigned long, flags)
  624. ),
  625. TP_fast_assign(
  626. __assign_str(heap_name, heap_name);
  627. __entry->len = len;
  628. __entry->align = align;
  629. __entry->flags = flags;
  630. ),
  631. TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
  632. __get_str(heap_name),
  633. __entry->len,
  634. __entry->align,
  635. __entry->flags)
  636. );
  637. DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_start,
  638. TP_PROTO(const char *heap_name,
  639. unsigned long len,
  640. unsigned long align,
  641. unsigned long flags),
  642. TP_ARGS(heap_name, len, align, flags)
  643. );
  644. DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_end,
  645. TP_PROTO(const char *heap_name,
  646. unsigned long len,
  647. unsigned long align,
  648. unsigned long flags),
  649. TP_ARGS(heap_name, len, align, flags)
  650. );
  651. DECLARE_EVENT_CLASS(iommu_sec_ptbl_map_range,
  652. TP_PROTO(int sec_id,
  653. int num,
  654. unsigned long va,
  655. unsigned int pa,
  656. size_t len),
  657. TP_ARGS(sec_id, num, va, pa, len),
  658. TP_STRUCT__entry(
  659. __field(int, sec_id)
  660. __field(int, num)
  661. __field(unsigned long, va)
  662. __field(unsigned int, pa)
  663. __field(size_t, len)
  664. ),
  665. TP_fast_assign(
  666. __entry->sec_id = sec_id;
  667. __entry->num = num;
  668. __entry->va = va;
  669. __entry->pa = pa;
  670. __entry->len = len;
  671. ),
  672. TP_printk("sec_id=%d num=%d va=%lx pa=%u len=%zu",
  673. __entry->sec_id,
  674. __entry->num,
  675. __entry->va,
  676. __entry->pa,
  677. __entry->len)
  678. );
  679. DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_start,
  680. TP_PROTO(int sec_id,
  681. int num,
  682. unsigned long va,
  683. unsigned int pa,
  684. size_t len),
  685. TP_ARGS(sec_id, num, va, pa, len)
  686. );
  687. DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_end,
  688. TP_PROTO(int sec_id,
  689. int num,
  690. unsigned long va,
  691. unsigned int pa,
  692. size_t len),
  693. TP_ARGS(sec_id, num, va, pa, len)
  694. );
  695. /*
  696. * Required for uniquely and securely identifying mm in rss_stat tracepoint.
  697. */
  698. #ifndef __PTR_TO_HASHVAL
  699. static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
  700. {
  701. int ret;
  702. unsigned long hashval;
  703. ret = ptr_to_hashval(ptr, &hashval);
  704. if (ret)
  705. return 0;
  706. /* The hashed value is only 32-bit */
  707. return (unsigned int)hashval;
  708. }
  709. #define __PTR_TO_HASHVAL
  710. #endif
  711. TRACE_EVENT(rss_stat,
  712. TP_PROTO(struct mm_struct *mm,
  713. int member,
  714. long count),
  715. TP_ARGS(mm, member, count),
  716. TP_STRUCT__entry(
  717. __field(unsigned int, mm_id)
  718. __field(unsigned int, curr)
  719. __field(int, member)
  720. __field(long, size)
  721. ),
  722. TP_fast_assign(
  723. __entry->mm_id = mm_ptr_to_hash(mm);
  724. __entry->curr = !!(current->mm == mm);
  725. __entry->member = member;
  726. __entry->size = (count << PAGE_SHIFT);
  727. ),
  728. TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
  729. __entry->mm_id,
  730. __entry->curr,
  731. __entry->member,
  732. __entry->size)
  733. );
  734. /* This part must be outside protection */
  735. #endif /* _TRACE_KMEM_H */
  736. /* This part must be outside protection */
  737. #include <trace/define_trace.h>