iommu-debug.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380
  1. /*
  2. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
  15. #include <linux/debugfs.h>
  16. #include <linux/device.h>
  17. #include <linux/iommu.h>
  18. #include <linux/of.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/slab.h>
  21. #include <linux/module.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/dma-contiguous.h>
  24. #include <soc/qcom/secure_buffer.h>
  25. #include <linux/dma-mapping.h>
  26. #include <asm/cacheflush.h>
  27. #include <asm/dma-iommu.h>
  28. #if defined(CONFIG_IOMMU_TESTS)
  29. static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
  30. {
  31. switch (attr) {
  32. case DOMAIN_ATTR_GEOMETRY:
  33. return "DOMAIN_ATTR_GEOMETRY";
  34. case DOMAIN_ATTR_PAGING:
  35. return "DOMAIN_ATTR_PAGING";
  36. case DOMAIN_ATTR_WINDOWS:
  37. return "DOMAIN_ATTR_WINDOWS";
  38. case DOMAIN_ATTR_FSL_PAMU_STASH:
  39. return "DOMAIN_ATTR_FSL_PAMU_STASH";
  40. case DOMAIN_ATTR_FSL_PAMU_ENABLE:
  41. return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
  42. case DOMAIN_ATTR_FSL_PAMUV1:
  43. return "DOMAIN_ATTR_FSL_PAMUV1";
  44. case DOMAIN_ATTR_NESTING:
  45. return "DOMAIN_ATTR_NESTING";
  46. case DOMAIN_ATTR_PT_BASE_ADDR:
  47. return "DOMAIN_ATTR_PT_BASE_ADDR";
  48. case DOMAIN_ATTR_SECURE_VMID:
  49. return "DOMAIN_ATTR_SECURE_VMID";
  50. case DOMAIN_ATTR_ATOMIC:
  51. return "DOMAIN_ATTR_ATOMIC";
  52. case DOMAIN_ATTR_CONTEXT_BANK:
  53. return "DOMAIN_ATTR_CONTEXT_BANK";
  54. case DOMAIN_ATTR_TTBR0:
  55. return "DOMAIN_ATTR_TTBR0";
  56. case DOMAIN_ATTR_CONTEXTIDR:
  57. return "DOMAIN_ATTR_CONTEXTIDR";
  58. case DOMAIN_ATTR_PROCID:
  59. return "DOMAIN_ATTR_PROCID";
  60. case DOMAIN_ATTR_DYNAMIC:
  61. return "DOMAIN_ATTR_DYNAMIC";
  62. case DOMAIN_ATTR_NON_FATAL_FAULTS:
  63. return "DOMAIN_ATTR_NON_FATAL_FAULTS";
  64. case DOMAIN_ATTR_S1_BYPASS:
  65. return "DOMAIN_ATTR_S1_BYPASS";
  66. case DOMAIN_ATTR_FAST:
  67. return "DOMAIN_ATTR_FAST";
  68. case DOMAIN_ATTR_EARLY_MAP:
  69. return "DOMAIN_ATTR_EARLY_MAP";
  70. case DOMAIN_ATTR_CB_STALL_DISABLE:
  71. return "DOMAIN_ATTR_CB_STALL_DISABLE";
  72. default:
  73. return "Unknown attr!";
  74. }
  75. }
  76. #endif
  77. #ifdef CONFIG_IOMMU_DEBUG_TRACKING
  78. static DEFINE_MUTEX(iommu_debug_attachments_lock);
  79. static LIST_HEAD(iommu_debug_attachments);
  80. /*
  81. * Each group may have more than one domain; but each domain may
  82. * only have one group.
  83. * Used by debug tools to display the name of the device(s) associated
  84. * with a particular domain.
  85. */
  86. struct iommu_debug_attachment {
  87. struct iommu_domain *domain;
  88. struct iommu_group *group;
  89. struct list_head list;
  90. };
  91. void iommu_debug_attach_device(struct iommu_domain *domain,
  92. struct device *dev)
  93. {
  94. struct iommu_debug_attachment *attach;
  95. struct iommu_group *group;
  96. group = dev->iommu_group;
  97. if (!group)
  98. return;
  99. mutex_lock(&iommu_debug_attachments_lock);
  100. list_for_each_entry(attach, &iommu_debug_attachments, list)
  101. if ((attach->domain == domain) && (attach->group == group))
  102. goto out;
  103. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  104. if (!attach)
  105. goto out;
  106. attach->domain = domain;
  107. attach->group = group;
  108. INIT_LIST_HEAD(&attach->list);
  109. list_add(&attach->list, &iommu_debug_attachments);
  110. out:
  111. mutex_unlock(&iommu_debug_attachments_lock);
  112. }
  113. void iommu_debug_domain_remove(struct iommu_domain *domain)
  114. {
  115. struct iommu_debug_attachment *it, *tmp;
  116. mutex_lock(&iommu_debug_attachments_lock);
  117. list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
  118. if (it->domain != domain)
  119. continue;
  120. list_del(&it->list);
  121. kfree(it);
  122. }
  123. mutex_unlock(&iommu_debug_attachments_lock);
  124. }
  125. #endif
  126. #ifdef CONFIG_IOMMU_TESTS
  127. #ifdef CONFIG_64BIT
  128. #define kstrtoux kstrtou64
  129. #define kstrtox_from_user kstrtoull_from_user
  130. #define kstrtosize_t kstrtoul
  131. #else
  132. #define kstrtoux kstrtou32
  133. #define kstrtox_from_user kstrtouint_from_user
  134. #define kstrtosize_t kstrtouint
  135. #endif
  136. static LIST_HEAD(iommu_debug_devices);
  137. static struct dentry *debugfs_tests_dir;
  138. static u32 iters_per_op = 1;
  139. static void *test_virt_addr;
  140. struct iommu_debug_device {
  141. struct device *dev;
  142. struct iommu_domain *domain;
  143. struct dma_iommu_mapping *mapping;
  144. u64 iova;
  145. u64 phys;
  146. size_t len;
  147. struct list_head list;
  148. struct mutex clk_lock;
  149. unsigned int clk_count;
  150. };
  151. static int iommu_debug_build_phoney_sg_table(struct device *dev,
  152. struct sg_table *table,
  153. unsigned long total_size,
  154. unsigned long chunk_size)
  155. {
  156. unsigned long nents = total_size / chunk_size;
  157. struct scatterlist *sg;
  158. int i;
  159. struct page *page;
  160. if (!IS_ALIGNED(total_size, PAGE_SIZE))
  161. return -EINVAL;
  162. if (!IS_ALIGNED(total_size, chunk_size))
  163. return -EINVAL;
  164. if (sg_alloc_table(table, nents, GFP_KERNEL))
  165. return -EINVAL;
  166. page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
  167. if (!page)
  168. goto free_table;
  169. /* all the same page... why not. */
  170. for_each_sg(table->sgl, sg, table->nents, i)
  171. sg_set_page(sg, page, chunk_size, 0);
  172. return 0;
  173. free_table:
  174. sg_free_table(table);
  175. return -ENOMEM;
  176. }
  177. static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
  178. struct sg_table *table,
  179. unsigned long chunk_size)
  180. {
  181. __free_pages(sg_page(table->sgl), get_order(chunk_size));
  182. sg_free_table(table);
  183. }
  184. static const char * const _size_to_string(unsigned long size)
  185. {
  186. switch (size) {
  187. case SZ_4K:
  188. return "4K";
  189. case SZ_8K:
  190. return "8K";
  191. case SZ_16K:
  192. return "16K";
  193. case SZ_64K:
  194. return "64K";
  195. case SZ_2M:
  196. return "2M";
  197. case SZ_1M * 12:
  198. return "12M";
  199. case SZ_1M * 20:
  200. return "20M";
  201. }
  202. return "unknown size, please add to _size_to_string";
  203. }
  204. static int nr_iters_set(void *data, u64 val)
  205. {
  206. if (!val)
  207. val = 1;
  208. if (val > 10000)
  209. val = 10000;
  210. *(u32 *)data = val;
  211. return 0;
  212. }
  213. static int nr_iters_get(void *data, u64 *val)
  214. {
  215. *val = *(u32 *)data;
  216. return 0;
  217. }
  218. DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
  219. nr_iters_get, nr_iters_set, "%llu\n");
  220. static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
  221. enum iommu_attr attrs[],
  222. void *attr_values[], int nattrs,
  223. const size_t sizes[])
  224. {
  225. int i;
  226. const size_t *sz;
  227. struct iommu_domain *domain;
  228. unsigned long iova = 0x10000;
  229. phys_addr_t paddr = 0xa000;
  230. domain = iommu_domain_alloc(&platform_bus_type);
  231. if (!domain) {
  232. seq_puts(s, "Couldn't allocate domain\n");
  233. return;
  234. }
  235. seq_puts(s, "Domain attributes: [ ");
  236. for (i = 0; i < nattrs; ++i) {
  237. /* not all attrs are ints, but this will get us by for now */
  238. seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
  239. *((int *)attr_values[i]),
  240. i < nattrs ? " " : "");
  241. }
  242. seq_puts(s, "]\n");
  243. for (i = 0; i < nattrs; ++i) {
  244. if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
  245. seq_printf(s, "Couldn't set %d to the value at %p\n",
  246. attrs[i], attr_values[i]);
  247. goto out_domain_free;
  248. }
  249. }
  250. if (iommu_attach_group(domain, dev->iommu_group)) {
  251. seq_puts(s,
  252. "Couldn't attach new domain to device. Is it already attached?\n");
  253. goto out_domain_free;
  254. }
  255. seq_printf(s, "(average over %d iterations)\n", iters_per_op);
  256. seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
  257. for (sz = sizes; *sz; ++sz) {
  258. size_t size = *sz;
  259. size_t unmapped;
  260. u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
  261. u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
  262. u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
  263. struct timespec tbefore, tafter, diff;
  264. int i;
  265. for (i = 0; i < iters_per_op; ++i) {
  266. getnstimeofday(&tbefore);
  267. if (iommu_map(domain, iova, paddr, size,
  268. IOMMU_READ | IOMMU_WRITE)) {
  269. seq_puts(s, "Failed to map\n");
  270. continue;
  271. }
  272. getnstimeofday(&tafter);
  273. diff = timespec_sub(tafter, tbefore);
  274. map_elapsed_ns += timespec_to_ns(&diff);
  275. getnstimeofday(&tbefore);
  276. unmapped = iommu_unmap(domain, iova, size);
  277. if (unmapped != size) {
  278. seq_printf(s,
  279. "Only unmapped %zx instead of %zx\n",
  280. unmapped, size);
  281. continue;
  282. }
  283. getnstimeofday(&tafter);
  284. diff = timespec_sub(tafter, tbefore);
  285. unmap_elapsed_ns += timespec_to_ns(&diff);
  286. }
  287. map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
  288. &map_elapsed_rem);
  289. unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
  290. &unmap_elapsed_rem);
  291. map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
  292. &map_elapsed_rem);
  293. unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
  294. &unmap_elapsed_rem);
  295. seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
  296. _size_to_string(size),
  297. map_elapsed_us, map_elapsed_rem,
  298. unmap_elapsed_us, unmap_elapsed_rem);
  299. }
  300. seq_putc(s, '\n');
  301. seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
  302. for (sz = sizes; *sz; ++sz) {
  303. size_t size = *sz;
  304. size_t unmapped;
  305. u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
  306. u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
  307. u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
  308. struct timespec tbefore, tafter, diff;
  309. struct sg_table table;
  310. unsigned long chunk_size = SZ_4K;
  311. int i;
  312. if (iommu_debug_build_phoney_sg_table(dev, &table, size,
  313. chunk_size)) {
  314. seq_puts(s,
  315. "couldn't build phoney sg table! bailing...\n");
  316. goto out_detach;
  317. }
  318. for (i = 0; i < iters_per_op; ++i) {
  319. getnstimeofday(&tbefore);
  320. if (iommu_map_sg(domain, iova, table.sgl, table.nents,
  321. IOMMU_READ | IOMMU_WRITE) != size) {
  322. seq_puts(s, "Failed to map_sg\n");
  323. goto next;
  324. }
  325. getnstimeofday(&tafter);
  326. diff = timespec_sub(tafter, tbefore);
  327. map_elapsed_ns += timespec_to_ns(&diff);
  328. getnstimeofday(&tbefore);
  329. unmapped = iommu_unmap(domain, iova, size);
  330. if (unmapped != size) {
  331. seq_printf(s,
  332. "Only unmapped %zx instead of %zx\n",
  333. unmapped, size);
  334. goto next;
  335. }
  336. getnstimeofday(&tafter);
  337. diff = timespec_sub(tafter, tbefore);
  338. unmap_elapsed_ns += timespec_to_ns(&diff);
  339. }
  340. map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
  341. &map_elapsed_rem);
  342. unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
  343. &unmap_elapsed_rem);
  344. map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
  345. &map_elapsed_rem);
  346. unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
  347. &unmap_elapsed_rem);
  348. seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
  349. _size_to_string(size),
  350. map_elapsed_us, map_elapsed_rem,
  351. unmap_elapsed_us, unmap_elapsed_rem);
  352. next:
  353. iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
  354. }
  355. out_detach:
  356. iommu_detach_group(domain, dev->iommu_group);
  357. out_domain_free:
  358. iommu_domain_free(domain);
  359. }
  360. static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
  361. {
  362. struct iommu_debug_device *ddev = s->private;
  363. const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
  364. SZ_1M * 20, 0 };
  365. enum iommu_attr attrs[] = {
  366. DOMAIN_ATTR_ATOMIC,
  367. };
  368. int htw_disable = 1, atomic = 1;
  369. void *attr_values[] = { &htw_disable, &atomic };
  370. iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
  371. ARRAY_SIZE(attrs), sizes);
  372. return 0;
  373. }
  374. static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
  375. {
  376. return single_open(file, iommu_debug_profiling_show, inode->i_private);
  377. }
  378. static const struct file_operations iommu_debug_profiling_fops = {
  379. .open = iommu_debug_profiling_open,
  380. .read = seq_read,
  381. .llseek = seq_lseek,
  382. .release = single_release,
  383. };
  384. static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
  385. {
  386. struct iommu_debug_device *ddev = s->private;
  387. const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
  388. SZ_1M * 20, 0 };
  389. enum iommu_attr attrs[] = {
  390. DOMAIN_ATTR_ATOMIC,
  391. DOMAIN_ATTR_SECURE_VMID,
  392. };
  393. int one = 1, secure_vmid = VMID_CP_PIXEL;
  394. void *attr_values[] = { &one, &secure_vmid };
  395. iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
  396. ARRAY_SIZE(attrs), sizes);
  397. return 0;
  398. }
  399. static int iommu_debug_secure_profiling_open(struct inode *inode,
  400. struct file *file)
  401. {
  402. return single_open(file, iommu_debug_secure_profiling_show,
  403. inode->i_private);
  404. }
  405. static const struct file_operations iommu_debug_secure_profiling_fops = {
  406. .open = iommu_debug_secure_profiling_open,
  407. .read = seq_read,
  408. .llseek = seq_lseek,
  409. .release = single_release,
  410. };
  411. static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
  412. {
  413. struct iommu_debug_device *ddev = s->private;
  414. size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
  415. enum iommu_attr attrs[] = {
  416. DOMAIN_ATTR_FAST,
  417. DOMAIN_ATTR_ATOMIC,
  418. };
  419. int one = 1;
  420. void *attr_values[] = { &one, &one };
  421. iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
  422. ARRAY_SIZE(attrs), sizes);
  423. return 0;
  424. }
  425. static int iommu_debug_profiling_fast_open(struct inode *inode,
  426. struct file *file)
  427. {
  428. return single_open(file, iommu_debug_profiling_fast_show,
  429. inode->i_private);
  430. }
  431. static const struct file_operations iommu_debug_profiling_fast_fops = {
  432. .open = iommu_debug_profiling_fast_open,
  433. .read = seq_read,
  434. .llseek = seq_lseek,
  435. .release = single_release,
  436. };
  437. static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
  438. void *ignored)
  439. {
  440. int i, experiment;
  441. struct iommu_debug_device *ddev = s->private;
  442. struct device *dev = ddev->dev;
  443. u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
  444. struct dma_iommu_mapping *mapping;
  445. dma_addr_t dma_addr;
  446. void *virt;
  447. int fast = 1;
  448. const char * const extra_labels[] = {
  449. "not coherent",
  450. "coherent",
  451. };
  452. unsigned long extra_attrs[] = {
  453. 0,
  454. DMA_ATTR_SKIP_CPU_SYNC,
  455. };
  456. virt = kmalloc(1518, GFP_KERNEL);
  457. if (!virt)
  458. goto out;
  459. mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
  460. if (!mapping) {
  461. seq_puts(s, "fast_smmu_create_mapping failed\n");
  462. goto out_kfree;
  463. }
  464. if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
  465. seq_puts(s, "iommu_domain_set_attr failed\n");
  466. goto out_release_mapping;
  467. }
  468. if (arm_iommu_attach_device(dev, mapping)) {
  469. seq_puts(s, "fast_smmu_attach_device failed\n");
  470. goto out_release_mapping;
  471. }
  472. if (iommu_enable_config_clocks(mapping->domain)) {
  473. seq_puts(s, "Couldn't enable clocks\n");
  474. goto out_detach;
  475. }
  476. for (experiment = 0; experiment < 2; ++experiment) {
  477. size_t map_avg = 0, unmap_avg = 0;
  478. for (i = 0; i < 10; ++i) {
  479. struct timespec tbefore, tafter, diff;
  480. u64 ns;
  481. getnstimeofday(&tbefore);
  482. dma_addr = dma_map_single_attrs(
  483. dev, virt, SZ_4K, DMA_TO_DEVICE,
  484. extra_attrs[experiment]);
  485. getnstimeofday(&tafter);
  486. diff = timespec_sub(tafter, tbefore);
  487. ns = timespec_to_ns(&diff);
  488. if (dma_mapping_error(dev, dma_addr)) {
  489. seq_puts(s, "dma_map_single failed\n");
  490. goto out_disable_config_clocks;
  491. }
  492. map_elapsed_ns[i] = ns;
  493. getnstimeofday(&tbefore);
  494. dma_unmap_single_attrs(
  495. dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
  496. extra_attrs[experiment]);
  497. getnstimeofday(&tafter);
  498. diff = timespec_sub(tafter, tbefore);
  499. ns = timespec_to_ns(&diff);
  500. unmap_elapsed_ns[i] = ns;
  501. }
  502. seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
  503. "dma_map_single_attrs");
  504. for (i = 0; i < 10; ++i) {
  505. map_avg += map_elapsed_ns[i];
  506. seq_printf(s, "%5llu%s", map_elapsed_ns[i],
  507. i < 9 ? ", " : "");
  508. }
  509. map_avg /= 10;
  510. seq_printf(s, "] (avg: %zu)\n", map_avg);
  511. seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
  512. "dma_unmap_single_attrs");
  513. for (i = 0; i < 10; ++i) {
  514. unmap_avg += unmap_elapsed_ns[i];
  515. seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
  516. i < 9 ? ", " : "");
  517. }
  518. unmap_avg /= 10;
  519. seq_printf(s, "] (avg: %zu)\n", unmap_avg);
  520. }
  521. out_disable_config_clocks:
  522. iommu_disable_config_clocks(mapping->domain);
  523. out_detach:
  524. arm_iommu_detach_device(dev);
  525. out_release_mapping:
  526. arm_iommu_release_mapping(mapping);
  527. out_kfree:
  528. kfree(virt);
  529. out:
  530. return 0;
  531. }
  532. static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
  533. struct file *file)
  534. {
  535. return single_open(file, iommu_debug_profiling_fast_dma_api_show,
  536. inode->i_private);
  537. }
  538. static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
  539. .open = iommu_debug_profiling_fast_dma_api_open,
  540. .read = seq_read,
  541. .llseek = seq_lseek,
  542. .release = single_release,
  543. };
  544. static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
  545. {
  546. int i, ret = 0;
  547. u64 iova;
  548. const u64 max = SZ_1G * 4ULL - 1;
  549. void *virt;
  550. phys_addr_t phys;
  551. dma_addr_t dma_addr;
  552. /*
  553. * we'll be doing 4K and 8K mappings. Need to own an entire 8K
  554. * chunk that we can work with.
  555. */
  556. virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
  557. phys = virt_to_phys(virt);
  558. /* fill the whole 4GB space */
  559. for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
  560. dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
  561. if (dma_addr == DMA_ERROR_CODE) {
  562. dev_err(dev, "Failed map on iter %d\n", i);
  563. ret = -EINVAL;
  564. goto out;
  565. }
  566. }
  567. if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
  568. dev_err(dev,
  569. "dma_map_single unexpectedly (VA should have been exhausted)\n");
  570. ret = -EINVAL;
  571. goto out;
  572. }
  573. /*
  574. * free up 4K at the very beginning, then leave one 4K mapping,
  575. * then free up 8K. This will result in the next 8K map to skip
  576. * over the 4K hole and take the 8K one.
  577. */
  578. dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
  579. dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
  580. dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
  581. /* remap 8K */
  582. dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
  583. if (dma_addr != SZ_8K) {
  584. dma_addr_t expected = SZ_8K;
  585. dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
  586. &dma_addr, &expected);
  587. ret = -EINVAL;
  588. goto out;
  589. }
  590. /*
  591. * now remap 4K. We should get the first 4K chunk that was skipped
  592. * over during the previous 8K map. If we missed a TLB invalidate
  593. * at that point this should explode.
  594. */
  595. dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
  596. if (dma_addr != 0) {
  597. dma_addr_t expected = 0;
  598. dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
  599. &dma_addr, &expected);
  600. ret = -EINVAL;
  601. goto out;
  602. }
  603. if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
  604. dev_err(dev,
  605. "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
  606. ret = -EINVAL;
  607. goto out;
  608. }
  609. /* we're all full again. unmap everything. */
  610. for (iova = 0; iova < max; iova += SZ_8K)
  611. dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
  612. out:
  613. free_pages((unsigned long)virt, get_order(SZ_8K));
  614. return ret;
  615. }
  616. struct fib_state {
  617. unsigned long cur;
  618. unsigned long prev;
  619. };
  620. static void fib_init(struct fib_state *f)
  621. {
  622. f->cur = f->prev = 1;
  623. }
  624. static unsigned long get_next_fib(struct fib_state *f)
  625. {
  626. int next = f->cur + f->prev;
  627. f->prev = f->cur;
  628. f->cur = next;
  629. return next;
  630. }
  631. /*
  632. * Not actually random. Just testing the fibs (and max - the fibs).
  633. */
  634. static int __rand_va_sweep(struct device *dev, struct seq_file *s,
  635. const size_t size)
  636. {
  637. u64 iova;
  638. const u64 max = SZ_1G * 4ULL - 1;
  639. int i, remapped, unmapped, ret = 0;
  640. void *virt;
  641. dma_addr_t dma_addr, dma_addr2;
  642. struct fib_state fib;
  643. virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
  644. if (!virt) {
  645. if (size > SZ_8K) {
  646. dev_err(dev,
  647. "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
  648. _size_to_string(size));
  649. return 0;
  650. }
  651. return -ENOMEM;
  652. }
  653. /* fill the whole 4GB space */
  654. for (iova = 0, i = 0; iova < max; iova += size, ++i) {
  655. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  656. if (dma_addr == DMA_ERROR_CODE) {
  657. dev_err(dev, "Failed map on iter %d\n", i);
  658. ret = -EINVAL;
  659. goto out;
  660. }
  661. }
  662. /* now unmap "random" iovas */
  663. unmapped = 0;
  664. fib_init(&fib);
  665. for (iova = get_next_fib(&fib) * size;
  666. iova < max - size;
  667. iova = (u64)get_next_fib(&fib) * size) {
  668. dma_addr = (dma_addr_t)(iova);
  669. dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
  670. if (dma_addr == dma_addr2) {
  671. WARN(1,
  672. "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
  673. __func__);
  674. return -EINVAL;
  675. }
  676. dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
  677. dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
  678. unmapped += 2;
  679. }
  680. /* and map until everything fills back up */
  681. for (remapped = 0; ; ++remapped) {
  682. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  683. if (dma_addr == DMA_ERROR_CODE)
  684. break;
  685. }
  686. if (unmapped != remapped) {
  687. dev_err(dev,
  688. "Unexpected random remap count! Unmapped %d but remapped %d\n",
  689. unmapped, remapped);
  690. ret = -EINVAL;
  691. }
  692. for (iova = 0; iova < max; iova += size)
  693. dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
  694. out:
  695. free_pages((unsigned long)virt, get_order(size));
  696. return ret;
  697. }
  698. static int __check_mapping(struct device *dev, struct iommu_domain *domain,
  699. dma_addr_t iova, phys_addr_t expected)
  700. {
  701. phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
  702. phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
  703. WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
  704. if (res != expected) {
  705. dev_err_ratelimited(dev,
  706. "Bad translation for %pa! Expected: %pa Got: %pa\n",
  707. &iova, &expected, &res);
  708. return -EINVAL;
  709. }
  710. return 0;
  711. }
  712. static int __full_va_sweep(struct device *dev, struct seq_file *s,
  713. const size_t size, struct iommu_domain *domain)
  714. {
  715. u64 iova;
  716. dma_addr_t dma_addr;
  717. void *virt;
  718. phys_addr_t phys;
  719. const u64 max = SZ_1G * 4ULL - 1;
  720. int ret = 0, i;
  721. virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
  722. if (!virt) {
  723. if (size > SZ_8K) {
  724. dev_err(dev,
  725. "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
  726. _size_to_string(size));
  727. return 0;
  728. }
  729. return -ENOMEM;
  730. }
  731. phys = virt_to_phys(virt);
  732. for (iova = 0, i = 0; iova < max; iova += size, ++i) {
  733. unsigned long expected = iova;
  734. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  735. if (dma_addr != expected) {
  736. dev_err_ratelimited(dev,
  737. "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
  738. i, expected,
  739. (unsigned long)dma_addr);
  740. ret = -EINVAL;
  741. goto out;
  742. }
  743. }
  744. if (domain) {
  745. /* check every mapping from 0..6M */
  746. for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
  747. phys_addr_t expected = phys;
  748. if (__check_mapping(dev, domain, iova, expected)) {
  749. dev_err(dev, "iter: %d\n", i);
  750. ret = -EINVAL;
  751. goto out;
  752. }
  753. }
  754. /* and from 4G..4G-6M */
  755. for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
  756. phys_addr_t expected = phys;
  757. unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
  758. if (__check_mapping(dev, domain, theiova, expected)) {
  759. dev_err(dev, "iter: %d\n", i);
  760. ret = -EINVAL;
  761. goto out;
  762. }
  763. }
  764. }
  765. /* at this point, our VA space should be full */
  766. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  767. if (dma_addr != DMA_ERROR_CODE) {
  768. dev_err_ratelimited(dev,
  769. "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
  770. (unsigned long)dma_addr);
  771. ret = -EINVAL;
  772. }
  773. out:
  774. for (iova = 0; iova < max; iova += size)
  775. dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
  776. free_pages((unsigned long)virt, get_order(size));
  777. return ret;
  778. }
  779. #define ds_printf(d, s, fmt, ...) ({ \
  780. dev_err(d, fmt, ##__VA_ARGS__); \
  781. seq_printf(s, fmt, ##__VA_ARGS__); \
  782. })
  783. static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
  784. struct iommu_domain *domain, void *priv)
  785. {
  786. int i, j, ret = 0;
  787. size_t *sz, *sizes = priv;
  788. for (j = 0; j < 1; ++j) {
  789. for (sz = sizes; *sz; ++sz) {
  790. for (i = 0; i < 2; ++i) {
  791. ds_printf(dev, s, "Full VA sweep @%s %d",
  792. _size_to_string(*sz), i);
  793. if (__full_va_sweep(dev, s, *sz, domain)) {
  794. ds_printf(dev, s, " -> FAILED\n");
  795. ret = -EINVAL;
  796. } else {
  797. ds_printf(dev, s, " -> SUCCEEDED\n");
  798. }
  799. }
  800. }
  801. }
  802. ds_printf(dev, s, "bonus map:");
  803. if (__full_va_sweep(dev, s, SZ_4K, domain)) {
  804. ds_printf(dev, s, " -> FAILED\n");
  805. ret = -EINVAL;
  806. } else {
  807. ds_printf(dev, s, " -> SUCCEEDED\n");
  808. }
  809. for (sz = sizes; *sz; ++sz) {
  810. for (i = 0; i < 2; ++i) {
  811. ds_printf(dev, s, "Rand VA sweep @%s %d",
  812. _size_to_string(*sz), i);
  813. if (__rand_va_sweep(dev, s, *sz)) {
  814. ds_printf(dev, s, " -> FAILED\n");
  815. ret = -EINVAL;
  816. } else {
  817. ds_printf(dev, s, " -> SUCCEEDED\n");
  818. }
  819. }
  820. }
  821. ds_printf(dev, s, "TLB stress sweep");
  822. if (__tlb_stress_sweep(dev, s)) {
  823. ds_printf(dev, s, " -> FAILED\n");
  824. ret = -EINVAL;
  825. } else {
  826. ds_printf(dev, s, " -> SUCCEEDED\n");
  827. }
  828. ds_printf(dev, s, "second bonus map:");
  829. if (__full_va_sweep(dev, s, SZ_4K, domain)) {
  830. ds_printf(dev, s, " -> FAILED\n");
  831. ret = -EINVAL;
  832. } else {
  833. ds_printf(dev, s, " -> SUCCEEDED\n");
  834. }
  835. return ret;
  836. }
  837. static int __functional_dma_api_alloc_test(struct device *dev,
  838. struct seq_file *s,
  839. struct iommu_domain *domain,
  840. void *ignored)
  841. {
  842. size_t size = SZ_1K * 742;
  843. int ret = 0;
  844. u8 *data;
  845. dma_addr_t iova;
  846. /* Make sure we can allocate and use a buffer */
  847. ds_printf(dev, s, "Allocating coherent buffer");
  848. data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
  849. if (!data) {
  850. ds_printf(dev, s, " -> FAILED\n");
  851. ret = -EINVAL;
  852. } else {
  853. int i;
  854. ds_printf(dev, s, " -> SUCCEEDED\n");
  855. ds_printf(dev, s, "Using coherent buffer");
  856. for (i = 0; i < 742; ++i) {
  857. int ind = SZ_1K * i;
  858. u8 *p = data + ind;
  859. u8 val = i % 255;
  860. memset(data, 0xa5, size);
  861. *p = val;
  862. (*p)++;
  863. if ((*p) != val + 1) {
  864. ds_printf(dev, s,
  865. " -> FAILED on iter %d since %d != %d\n",
  866. i, *p, val + 1);
  867. ret = -EINVAL;
  868. }
  869. }
  870. if (!ret)
  871. ds_printf(dev, s, " -> SUCCEEDED\n");
  872. dma_free_coherent(dev, size, data, iova);
  873. }
  874. return ret;
  875. }
  876. static int __functional_dma_api_basic_test(struct device *dev,
  877. struct seq_file *s,
  878. struct iommu_domain *domain,
  879. void *ignored)
  880. {
  881. size_t size = 1518;
  882. int i, j, ret = 0;
  883. u8 *data;
  884. dma_addr_t iova;
  885. phys_addr_t pa, pa2;
  886. ds_printf(dev, s, "Basic DMA API test");
  887. /* Make sure we can allocate and use a buffer */
  888. for (i = 0; i < 1000; ++i) {
  889. data = kmalloc(size, GFP_KERNEL);
  890. if (!data) {
  891. ds_printf(dev, s, " -> FAILED\n");
  892. ret = -EINVAL;
  893. goto out;
  894. }
  895. memset(data, 0xa5, size);
  896. iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
  897. pa = iommu_iova_to_phys(domain, iova);
  898. pa2 = iommu_iova_to_phys_hard(domain, iova);
  899. if (pa != pa2) {
  900. dev_err(dev,
  901. "iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
  902. &pa, &pa2);
  903. ret = -EINVAL;
  904. goto out;
  905. }
  906. pa2 = virt_to_phys(data);
  907. if (pa != pa2) {
  908. dev_err(dev,
  909. "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
  910. &pa, &pa2);
  911. ret = -EINVAL;
  912. goto out;
  913. }
  914. dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
  915. for (j = 0; j < size; ++j) {
  916. if (data[j] != 0xa5) {
  917. dev_err(dev, "data[%d] != 0xa5\n", data[j]);
  918. ret = -EINVAL;
  919. goto out;
  920. }
  921. }
  922. kfree(data);
  923. }
  924. out:
  925. if (ret)
  926. ds_printf(dev, s, " -> FAILED\n");
  927. else
  928. ds_printf(dev, s, " -> SUCCEEDED\n");
  929. return ret;
  930. }
  931. /* Creates a fresh fast mapping and applies @fn to it */
  932. static int __apply_to_new_mapping(struct seq_file *s,
  933. int (*fn)(struct device *dev,
  934. struct seq_file *s,
  935. struct iommu_domain *domain,
  936. void *priv),
  937. void *priv)
  938. {
  939. struct dma_iommu_mapping *mapping;
  940. struct iommu_debug_device *ddev = s->private;
  941. struct device *dev = ddev->dev;
  942. int ret = -EINVAL, fast = 1;
  943. phys_addr_t pt_phys;
  944. mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
  945. (SZ_1G * 4ULL));
  946. if (!mapping)
  947. goto out;
  948. if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
  949. seq_puts(s, "iommu_domain_set_attr failed\n");
  950. goto out_release_mapping;
  951. }
  952. if (arm_iommu_attach_device(dev, mapping))
  953. goto out_release_mapping;
  954. if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
  955. &pt_phys)) {
  956. ds_printf(dev, s, "Couldn't get page table base address\n");
  957. goto out_release_mapping;
  958. }
  959. dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
  960. if (iommu_enable_config_clocks(mapping->domain)) {
  961. ds_printf(dev, s, "Couldn't enable clocks\n");
  962. goto out_release_mapping;
  963. }
  964. ret = fn(dev, s, mapping->domain, priv);
  965. iommu_disable_config_clocks(mapping->domain);
  966. arm_iommu_detach_device(dev);
  967. out_release_mapping:
  968. arm_iommu_release_mapping(mapping);
  969. out:
  970. seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
  971. return 0;
  972. }
  973. static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
  974. void *ignored)
  975. {
  976. size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
  977. int ret = 0;
  978. ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
  979. ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
  980. ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
  981. return ret;
  982. }
  983. static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
  984. struct file *file)
  985. {
  986. return single_open(file, iommu_debug_functional_fast_dma_api_show,
  987. inode->i_private);
  988. }
  989. static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
  990. .open = iommu_debug_functional_fast_dma_api_open,
  991. .read = seq_read,
  992. .llseek = seq_lseek,
  993. .release = single_release,
  994. };
  995. static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
  996. void *ignored)
  997. {
  998. struct dma_iommu_mapping *mapping;
  999. struct iommu_debug_device *ddev = s->private;
  1000. struct device *dev = ddev->dev;
  1001. size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
  1002. int ret = -EINVAL;
  1003. /* Make the size equal to MAX_ULONG */
  1004. mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
  1005. (SZ_1G * 4ULL - 1));
  1006. if (!mapping)
  1007. goto out;
  1008. if (arm_iommu_attach_device(dev, mapping))
  1009. goto out_release_mapping;
  1010. ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
  1011. ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
  1012. arm_iommu_detach_device(dev);
  1013. out_release_mapping:
  1014. arm_iommu_release_mapping(mapping);
  1015. out:
  1016. seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
  1017. return 0;
  1018. }
  1019. static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
  1020. struct file *file)
  1021. {
  1022. return single_open(file, iommu_debug_functional_arm_dma_api_show,
  1023. inode->i_private);
  1024. }
  1025. static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
  1026. .open = iommu_debug_functional_arm_dma_api_open,
  1027. .read = seq_read,
  1028. .llseek = seq_lseek,
  1029. .release = single_release,
  1030. };
  1031. static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
  1032. int val, bool is_secure)
  1033. {
  1034. struct iommu_group *group = ddev->dev->iommu_group;
  1035. ddev->domain = iommu_domain_alloc(&platform_bus_type);
  1036. if (!ddev->domain) {
  1037. pr_err("Couldn't allocate domain\n");
  1038. return -ENOMEM;
  1039. }
  1040. val = VMID_CP_CAMERA;
  1041. if (is_secure && iommu_domain_set_attr(ddev->domain,
  1042. DOMAIN_ATTR_SECURE_VMID,
  1043. &val)) {
  1044. pr_err("Couldn't set secure vmid to %d\n", val);
  1045. goto out_domain_free;
  1046. }
  1047. if (iommu_attach_group(ddev->domain, group)) {
  1048. dev_err(ddev->dev, "Couldn't attach new domain to device\n");
  1049. goto out_domain_free;
  1050. }
  1051. return 0;
  1052. out_domain_free:
  1053. iommu_domain_free(ddev->domain);
  1054. ddev->domain = NULL;
  1055. return -EIO;
  1056. }
  1057. static ssize_t __iommu_debug_dma_attach_write(struct file *file,
  1058. const char __user *ubuf,
  1059. size_t count, loff_t *offset)
  1060. {
  1061. struct iommu_debug_device *ddev = file->private_data;
  1062. struct device *dev = ddev->dev;
  1063. struct dma_iommu_mapping *dma_mapping;
  1064. ssize_t retval = -EINVAL;
  1065. int val;
  1066. if (kstrtoint_from_user(ubuf, count, 0, &val)) {
  1067. pr_err("Invalid format. Expected a hex or decimal integer");
  1068. retval = -EFAULT;
  1069. goto out;
  1070. }
  1071. if (val) {
  1072. if (dev->archdata.mapping)
  1073. if (dev->archdata.mapping->domain) {
  1074. pr_err("Already attached.\n");
  1075. retval = -EINVAL;
  1076. goto out;
  1077. }
  1078. if (WARN(dev->archdata.iommu,
  1079. "Attachment tracking out of sync with device\n")) {
  1080. retval = -EINVAL;
  1081. goto out;
  1082. }
  1083. dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
  1084. (SZ_1G * 4ULL));
  1085. if (!dma_mapping)
  1086. goto out;
  1087. if (arm_iommu_attach_device(dev, dma_mapping))
  1088. goto out_release_mapping;
  1089. ddev->mapping = dma_mapping;
  1090. pr_err("Attached\n");
  1091. } else {
  1092. if (!dev->archdata.mapping) {
  1093. pr_err("No mapping. Did you already attach?\n");
  1094. retval = -EINVAL;
  1095. goto out;
  1096. }
  1097. if (!dev->archdata.mapping->domain) {
  1098. pr_err("No domain. Did you already attach?\n");
  1099. retval = -EINVAL;
  1100. goto out;
  1101. }
  1102. arm_iommu_detach_device(dev);
  1103. arm_iommu_release_mapping(ddev->mapping);
  1104. pr_err("Detached\n");
  1105. }
  1106. retval = count;
  1107. return retval;
  1108. out_release_mapping:
  1109. arm_iommu_release_mapping(dma_mapping);
  1110. out:
  1111. return retval;
  1112. }
  1113. static ssize_t __iommu_debug_attach_write(struct file *file,
  1114. const char __user *ubuf,
  1115. size_t count, loff_t *offset,
  1116. bool is_secure)
  1117. {
  1118. struct iommu_debug_device *ddev = file->private_data;
  1119. struct device *dev = ddev->dev;
  1120. struct iommu_domain *domain;
  1121. ssize_t retval;
  1122. int val;
  1123. if (kstrtoint_from_user(ubuf, count, 0, &val)) {
  1124. pr_err("Invalid format. Expected a hex or decimal integer");
  1125. retval = -EFAULT;
  1126. goto out;
  1127. }
  1128. if (val) {
  1129. if (ddev->domain) {
  1130. pr_err("Iommu-Debug is already attached?\n");
  1131. retval = -EINVAL;
  1132. goto out;
  1133. }
  1134. domain = iommu_get_domain_for_dev(dev);
  1135. if (domain) {
  1136. pr_err("Another driver is using this device's iommu\n"
  1137. "Iommu-Debug cannot be used concurrently\n");
  1138. retval = -EINVAL;
  1139. goto out;
  1140. }
  1141. if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
  1142. retval = -EIO;
  1143. goto out;
  1144. }
  1145. pr_err("Attached\n");
  1146. } else {
  1147. if (!ddev->domain) {
  1148. pr_err("Iommu-Debug is not attached?\n");
  1149. retval = -EINVAL;
  1150. goto out;
  1151. }
  1152. iommu_detach_group(ddev->domain, dev->iommu_group);
  1153. iommu_domain_free(ddev->domain);
  1154. ddev->domain = NULL;
  1155. pr_err("Detached\n");
  1156. }
  1157. retval = count;
  1158. out:
  1159. return retval;
  1160. }
  1161. static ssize_t iommu_debug_dma_attach_write(struct file *file,
  1162. const char __user *ubuf,
  1163. size_t count, loff_t *offset)
  1164. {
  1165. return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
  1166. }
  1167. static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
  1168. size_t count, loff_t *offset)
  1169. {
  1170. struct iommu_debug_device *ddev = file->private_data;
  1171. struct device *dev = ddev->dev;
  1172. char c[2];
  1173. size_t buflen = sizeof(c);
  1174. if (*offset)
  1175. return 0;
  1176. if (!dev->archdata.mapping)
  1177. c[0] = '0';
  1178. else
  1179. c[0] = dev->archdata.mapping->domain ? '1' : '0';
  1180. c[1] = '\n';
  1181. buflen = min(count, buflen);
  1182. if (copy_to_user(ubuf, &c, buflen)) {
  1183. pr_err("copy_to_user failed\n");
  1184. return -EFAULT;
  1185. }
  1186. *offset = 1; /* non-zero means we're done */
  1187. return buflen;
  1188. }
  1189. static const struct file_operations iommu_debug_dma_attach_fops = {
  1190. .open = simple_open,
  1191. .write = iommu_debug_dma_attach_write,
  1192. .read = iommu_debug_dma_attach_read,
  1193. };
  1194. static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
  1195. char __user *ubuf,
  1196. size_t count, loff_t *offset)
  1197. {
  1198. char buf[100];
  1199. ssize_t retval;
  1200. size_t buflen;
  1201. int buf_len = sizeof(buf);
  1202. if (*offset)
  1203. return 0;
  1204. memset(buf, 0, buf_len);
  1205. if (!test_virt_addr)
  1206. strlcpy(buf, "FAIL\n", buf_len);
  1207. else
  1208. snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
  1209. buflen = min(count, strlen(buf));
  1210. if (copy_to_user(ubuf, buf, buflen)) {
  1211. pr_err("Couldn't copy_to_user\n");
  1212. retval = -EFAULT;
  1213. } else {
  1214. *offset = 1; /* non-zero means we're done */
  1215. retval = buflen;
  1216. }
  1217. return retval;
  1218. }
  1219. static const struct file_operations iommu_debug_test_virt_addr_fops = {
  1220. .open = simple_open,
  1221. .read = iommu_debug_test_virt_addr_read,
  1222. };
  1223. static ssize_t iommu_debug_attach_write(struct file *file,
  1224. const char __user *ubuf,
  1225. size_t count, loff_t *offset)
  1226. {
  1227. return __iommu_debug_attach_write(file, ubuf, count, offset,
  1228. false);
  1229. }
  1230. static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
  1231. size_t count, loff_t *offset)
  1232. {
  1233. struct iommu_debug_device *ddev = file->private_data;
  1234. char c[2];
  1235. size_t buflen = sizeof(c);
  1236. if (*offset)
  1237. return 0;
  1238. c[0] = ddev->domain ? '1' : '0';
  1239. c[1] = '\n';
  1240. buflen = min(count, buflen);
  1241. if (copy_to_user(ubuf, &c, buflen)) {
  1242. pr_err("copy_to_user failed\n");
  1243. return -EFAULT;
  1244. }
  1245. *offset = 1; /* non-zero means we're done */
  1246. return buflen;
  1247. }
  1248. static const struct file_operations iommu_debug_attach_fops = {
  1249. .open = simple_open,
  1250. .write = iommu_debug_attach_write,
  1251. .read = iommu_debug_attach_read,
  1252. };
  1253. static ssize_t iommu_debug_attach_write_secure(struct file *file,
  1254. const char __user *ubuf,
  1255. size_t count, loff_t *offset)
  1256. {
  1257. return __iommu_debug_attach_write(file, ubuf, count, offset,
  1258. true);
  1259. }
  1260. static const struct file_operations iommu_debug_secure_attach_fops = {
  1261. .open = simple_open,
  1262. .write = iommu_debug_attach_write_secure,
  1263. .read = iommu_debug_attach_read,
  1264. };
  1265. static ssize_t iommu_debug_pte_write(struct file *file,
  1266. const char __user *ubuf,
  1267. size_t count, loff_t *offset)
  1268. {
  1269. struct iommu_debug_device *ddev = file->private_data;
  1270. dma_addr_t iova;
  1271. if (kstrtox_from_user(ubuf, count, 0, &iova)) {
  1272. pr_err("Invalid format for iova\n");
  1273. ddev->iova = 0;
  1274. return -EINVAL;
  1275. }
  1276. ddev->iova = iova;
  1277. pr_err("Saved iova=%pa for future PTE commands\n", &iova);
  1278. return count;
  1279. }
  1280. static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
  1281. size_t count, loff_t *offset)
  1282. {
  1283. struct iommu_debug_device *ddev = file->private_data;
  1284. struct device *dev = ddev->dev;
  1285. uint64_t pte;
  1286. char buf[100];
  1287. ssize_t retval;
  1288. size_t buflen;
  1289. if (kptr_restrict != 0) {
  1290. pr_err("kptr_restrict needs to be disabled.\n");
  1291. return -EPERM;
  1292. }
  1293. if (!dev->archdata.mapping) {
  1294. pr_err("No mapping. Did you already attach?\n");
  1295. return -EINVAL;
  1296. }
  1297. if (!dev->archdata.mapping->domain) {
  1298. pr_err("No domain. Did you already attach?\n");
  1299. return -EINVAL;
  1300. }
  1301. if (*offset)
  1302. return 0;
  1303. memset(buf, 0, sizeof(buf));
  1304. pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
  1305. ddev->iova);
  1306. if (!pte)
  1307. strlcpy(buf, "FAIL\n", sizeof(buf));
  1308. else
  1309. snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
  1310. buflen = min(count, strlen(buf));
  1311. if (copy_to_user(ubuf, buf, buflen)) {
  1312. pr_err("Couldn't copy_to_user\n");
  1313. retval = -EFAULT;
  1314. } else {
  1315. *offset = 1; /* non-zero means we're done */
  1316. retval = buflen;
  1317. }
  1318. return retval;
  1319. }
  1320. static const struct file_operations iommu_debug_pte_fops = {
  1321. .open = simple_open,
  1322. .write = iommu_debug_pte_write,
  1323. .read = iommu_debug_pte_read,
  1324. };
  1325. static ssize_t iommu_debug_atos_write(struct file *file,
  1326. const char __user *ubuf,
  1327. size_t count, loff_t *offset)
  1328. {
  1329. struct iommu_debug_device *ddev = file->private_data;
  1330. dma_addr_t iova;
  1331. if (kstrtox_from_user(ubuf, count, 0, &iova)) {
  1332. pr_err("Invalid format for iova\n");
  1333. ddev->iova = 0;
  1334. return -EINVAL;
  1335. }
  1336. ddev->iova = iova;
  1337. pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
  1338. return count;
  1339. }
  1340. static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
  1341. size_t count, loff_t *offset)
  1342. {
  1343. struct iommu_debug_device *ddev = file->private_data;
  1344. phys_addr_t phys;
  1345. char buf[100];
  1346. ssize_t retval;
  1347. size_t buflen;
  1348. if (kptr_restrict != 0) {
  1349. pr_err("kptr_restrict needs to be disabled.\n");
  1350. return -EPERM;
  1351. }
  1352. if (!ddev->domain) {
  1353. pr_err("No domain. Did you already attach?\n");
  1354. return -EINVAL;
  1355. }
  1356. if (*offset)
  1357. return 0;
  1358. memset(buf, 0, 100);
  1359. phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
  1360. if (!phys) {
  1361. strlcpy(buf, "FAIL\n", 100);
  1362. phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
  1363. dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
  1364. &ddev->iova, &phys);
  1365. } else {
  1366. snprintf(buf, 100, "%pa\n", &phys);
  1367. }
  1368. buflen = min(count, strlen(buf));
  1369. if (copy_to_user(ubuf, buf, buflen)) {
  1370. pr_err("Couldn't copy_to_user\n");
  1371. retval = -EFAULT;
  1372. } else {
  1373. *offset = 1; /* non-zero means we're done */
  1374. retval = buflen;
  1375. }
  1376. return retval;
  1377. }
  1378. static const struct file_operations iommu_debug_atos_fops = {
  1379. .open = simple_open,
  1380. .write = iommu_debug_atos_write,
  1381. .read = iommu_debug_atos_read,
  1382. };
  1383. static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
  1384. size_t count, loff_t *offset)
  1385. {
  1386. struct iommu_debug_device *ddev = file->private_data;
  1387. struct device *dev = ddev->dev;
  1388. phys_addr_t phys;
  1389. char buf[100];
  1390. ssize_t retval;
  1391. size_t buflen;
  1392. if (kptr_restrict != 0) {
  1393. pr_err("kptr_restrict needs to be disabled.\n");
  1394. return -EPERM;
  1395. }
  1396. if (!dev->archdata.mapping) {
  1397. pr_err("No mapping. Did you already attach?\n");
  1398. return -EINVAL;
  1399. }
  1400. if (!dev->archdata.mapping->domain) {
  1401. pr_err("No domain. Did you already attach?\n");
  1402. return -EINVAL;
  1403. }
  1404. if (*offset)
  1405. return 0;
  1406. memset(buf, 0, sizeof(buf));
  1407. phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
  1408. ddev->iova);
  1409. if (!phys)
  1410. strlcpy(buf, "FAIL\n", sizeof(buf));
  1411. else
  1412. snprintf(buf, sizeof(buf), "%pa\n", &phys);
  1413. buflen = min(count, strlen(buf));
  1414. if (copy_to_user(ubuf, buf, buflen)) {
  1415. pr_err("Couldn't copy_to_user\n");
  1416. retval = -EFAULT;
  1417. } else {
  1418. *offset = 1; /* non-zero means we're done */
  1419. retval = buflen;
  1420. }
  1421. return retval;
  1422. }
  1423. static const struct file_operations iommu_debug_dma_atos_fops = {
  1424. .open = simple_open,
  1425. .write = iommu_debug_atos_write,
  1426. .read = iommu_debug_dma_atos_read,
  1427. };
  1428. static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
  1429. size_t count, loff_t *offset)
  1430. {
  1431. ssize_t retval = -EINVAL;
  1432. int ret;
  1433. char *comma1, *comma2, *comma3;
  1434. char buf[100];
  1435. dma_addr_t iova;
  1436. phys_addr_t phys;
  1437. size_t size;
  1438. int prot;
  1439. struct iommu_debug_device *ddev = file->private_data;
  1440. if (count >= 100) {
  1441. pr_err("Value too large\n");
  1442. return -EINVAL;
  1443. }
  1444. if (!ddev->domain) {
  1445. pr_err("No domain. Did you already attach?\n");
  1446. return -EINVAL;
  1447. }
  1448. memset(buf, 0, 100);
  1449. if (copy_from_user(buf, ubuf, count)) {
  1450. pr_err("Couldn't copy from user\n");
  1451. retval = -EFAULT;
  1452. }
  1453. comma1 = strnchr(buf, count, ',');
  1454. if (!comma1)
  1455. goto invalid_format;
  1456. comma2 = strnchr(comma1 + 1, count, ',');
  1457. if (!comma2)
  1458. goto invalid_format;
  1459. comma3 = strnchr(comma2 + 1, count, ',');
  1460. if (!comma3)
  1461. goto invalid_format;
  1462. /* split up the words */
  1463. *comma1 = *comma2 = *comma3 = '\0';
  1464. if (kstrtoux(buf, 0, &iova))
  1465. goto invalid_format;
  1466. if (kstrtoux(comma1 + 1, 0, &phys))
  1467. goto invalid_format;
  1468. if (kstrtosize_t(comma2 + 1, 0, &size))
  1469. goto invalid_format;
  1470. if (kstrtoint(comma3 + 1, 0, &prot))
  1471. goto invalid_format;
  1472. ret = iommu_map(ddev->domain, iova, phys, size, prot);
  1473. if (ret) {
  1474. pr_err("iommu_map failed with %d\n", ret);
  1475. retval = -EIO;
  1476. goto out;
  1477. }
  1478. retval = count;
  1479. pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
  1480. &iova, &phys, size, prot);
  1481. out:
  1482. return retval;
  1483. invalid_format:
  1484. pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
  1485. return -EINVAL;
  1486. }
  1487. static const struct file_operations iommu_debug_map_fops = {
  1488. .open = simple_open,
  1489. .write = iommu_debug_map_write,
  1490. };
  1491. /*
  1492. * Performs DMA mapping of a given virtual address and size to an iova address.
  1493. * User input format: (addr,len,dma attr) where dma attr is:
  1494. * 0: normal mapping
  1495. * 1: force coherent mapping
  1496. * 2: force non-cohernet mapping
  1497. * 3: use system cache
  1498. */
  1499. static ssize_t iommu_debug_dma_map_write(struct file *file,
  1500. const char __user *ubuf, size_t count, loff_t *offset)
  1501. {
  1502. ssize_t retval = -EINVAL;
  1503. int ret;
  1504. char *comma1, *comma2;
  1505. char buf[100];
  1506. unsigned long addr;
  1507. void *v_addr;
  1508. dma_addr_t iova;
  1509. size_t size;
  1510. unsigned int attr;
  1511. unsigned long dma_attrs;
  1512. struct iommu_debug_device *ddev = file->private_data;
  1513. struct device *dev = ddev->dev;
  1514. if (count >= sizeof(buf)) {
  1515. pr_err("Value too large\n");
  1516. return -EINVAL;
  1517. }
  1518. if (!dev->archdata.mapping) {
  1519. pr_err("No mapping. Did you already attach?\n");
  1520. retval = -EINVAL;
  1521. goto out;
  1522. }
  1523. if (!dev->archdata.mapping->domain) {
  1524. pr_err("No domain. Did you already attach?\n");
  1525. retval = -EINVAL;
  1526. goto out;
  1527. }
  1528. memset(buf, 0, sizeof(buf));
  1529. if (copy_from_user(buf, ubuf, count)) {
  1530. pr_err("Couldn't copy from user\n");
  1531. retval = -EFAULT;
  1532. goto out;
  1533. }
  1534. comma1 = strnchr(buf, count, ',');
  1535. if (!comma1)
  1536. goto invalid_format;
  1537. comma2 = strnchr(comma1 + 1, count, ',');
  1538. if (!comma2)
  1539. goto invalid_format;
  1540. *comma1 = *comma2 = '\0';
  1541. if (kstrtoul(buf, 0, &addr))
  1542. goto invalid_format;
  1543. v_addr = (void *)addr;
  1544. if (kstrtosize_t(comma1 + 1, 0, &size))
  1545. goto invalid_format;
  1546. if (kstrtouint(comma2 + 1, 0, &attr))
  1547. goto invalid_format;
  1548. if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
  1549. goto invalid_addr;
  1550. if (attr == 0)
  1551. dma_attrs = 0;
  1552. else if (attr == 1)
  1553. dma_attrs = DMA_ATTR_FORCE_COHERENT;
  1554. else if (attr == 2)
  1555. dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
  1556. else if (attr == 3)
  1557. dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  1558. else
  1559. goto invalid_format;
  1560. iova = dma_map_single_attrs(dev, v_addr, size,
  1561. DMA_TO_DEVICE, dma_attrs);
  1562. if (dma_mapping_error(dev, iova)) {
  1563. pr_err("Failed to perform dma_map_single\n");
  1564. ret = -EINVAL;
  1565. goto out;
  1566. }
  1567. retval = count;
  1568. pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
  1569. v_addr, &iova, size);
  1570. ddev->iova = iova;
  1571. pr_err("Saved iova=%pa for future PTE commands\n", &iova);
  1572. out:
  1573. return retval;
  1574. invalid_format:
  1575. pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
  1576. return retval;
  1577. invalid_addr:
  1578. pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
  1579. return retval;
  1580. }
  1581. static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
  1582. size_t count, loff_t *offset)
  1583. {
  1584. struct iommu_debug_device *ddev = file->private_data;
  1585. struct device *dev = ddev->dev;
  1586. char buf[100];
  1587. ssize_t retval;
  1588. size_t buflen;
  1589. dma_addr_t iova;
  1590. if (!dev->archdata.mapping) {
  1591. pr_err("No mapping. Did you already attach?\n");
  1592. return -EINVAL;
  1593. }
  1594. if (!dev->archdata.mapping->domain) {
  1595. pr_err("No domain. Did you already attach?\n");
  1596. return -EINVAL;
  1597. }
  1598. if (*offset)
  1599. return 0;
  1600. memset(buf, 0, sizeof(buf));
  1601. iova = ddev->iova;
  1602. snprintf(buf, sizeof(buf), "%pa\n", &iova);
  1603. buflen = min(count, strlen(buf));
  1604. if (copy_to_user(ubuf, buf, buflen)) {
  1605. pr_err("Couldn't copy_to_user\n");
  1606. retval = -EFAULT;
  1607. } else {
  1608. *offset = 1; /* non-zero means we're done */
  1609. retval = buflen;
  1610. }
  1611. return retval;
  1612. }
  1613. static const struct file_operations iommu_debug_dma_map_fops = {
  1614. .open = simple_open,
  1615. .write = iommu_debug_dma_map_write,
  1616. .read = iommu_debug_dma_map_read,
  1617. };
  1618. static ssize_t iommu_debug_unmap_write(struct file *file,
  1619. const char __user *ubuf,
  1620. size_t count, loff_t *offset)
  1621. {
  1622. ssize_t retval = 0;
  1623. char *comma1;
  1624. char buf[100];
  1625. dma_addr_t iova;
  1626. size_t size;
  1627. size_t unmapped;
  1628. struct iommu_debug_device *ddev = file->private_data;
  1629. if (count >= 100) {
  1630. pr_err("Value too large\n");
  1631. return -EINVAL;
  1632. }
  1633. if (!ddev->domain) {
  1634. pr_err("No domain. Did you already attach?\n");
  1635. return -EINVAL;
  1636. }
  1637. memset(buf, 0, 100);
  1638. if (copy_from_user(buf, ubuf, count)) {
  1639. pr_err("Couldn't copy from user\n");
  1640. retval = -EFAULT;
  1641. goto out;
  1642. }
  1643. comma1 = strnchr(buf, count, ',');
  1644. if (!comma1)
  1645. goto invalid_format;
  1646. /* split up the words */
  1647. *comma1 = '\0';
  1648. if (kstrtoux(buf, 0, &iova))
  1649. goto invalid_format;
  1650. if (kstrtosize_t(comma1 + 1, 0, &size))
  1651. goto invalid_format;
  1652. unmapped = iommu_unmap(ddev->domain, iova, size);
  1653. if (unmapped != size) {
  1654. pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
  1655. size, unmapped);
  1656. return -EIO;
  1657. }
  1658. retval = count;
  1659. pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
  1660. out:
  1661. return retval;
  1662. invalid_format:
  1663. pr_err("Invalid format. Expected: iova,len\n");
  1664. return -EINVAL;
  1665. }
  1666. static const struct file_operations iommu_debug_unmap_fops = {
  1667. .open = simple_open,
  1668. .write = iommu_debug_unmap_write,
  1669. };
  1670. static ssize_t iommu_debug_dma_unmap_write(struct file *file,
  1671. const char __user *ubuf,
  1672. size_t count, loff_t *offset)
  1673. {
  1674. ssize_t retval = 0;
  1675. char *comma1, *comma2;
  1676. char buf[100];
  1677. size_t size;
  1678. unsigned int attr;
  1679. dma_addr_t iova;
  1680. unsigned long dma_attrs;
  1681. struct iommu_debug_device *ddev = file->private_data;
  1682. struct device *dev = ddev->dev;
  1683. if (count >= sizeof(buf)) {
  1684. pr_err("Value too large\n");
  1685. return -EINVAL;
  1686. }
  1687. if (!dev->archdata.mapping) {
  1688. pr_err("No mapping. Did you already attach?\n");
  1689. retval = -EINVAL;
  1690. goto out;
  1691. }
  1692. if (!dev->archdata.mapping->domain) {
  1693. pr_err("No domain. Did you already attach?\n");
  1694. retval = -EINVAL;
  1695. goto out;
  1696. }
  1697. memset(buf, 0, sizeof(buf));
  1698. if (copy_from_user(buf, ubuf, count)) {
  1699. pr_err("Couldn't copy from user\n");
  1700. retval = -EFAULT;
  1701. goto out;
  1702. }
  1703. comma1 = strnchr(buf, count, ',');
  1704. if (!comma1)
  1705. goto invalid_format;
  1706. comma2 = strnchr(comma1 + 1, count, ',');
  1707. if (!comma2)
  1708. goto invalid_format;
  1709. *comma1 = *comma2 = '\0';
  1710. if (kstrtoux(buf, 0, &iova))
  1711. goto invalid_format;
  1712. if (kstrtosize_t(comma1 + 1, 0, &size))
  1713. goto invalid_format;
  1714. if (kstrtouint(comma2 + 1, 0, &attr))
  1715. goto invalid_format;
  1716. if (attr == 0)
  1717. dma_attrs = 0;
  1718. else if (attr == 1)
  1719. dma_attrs = DMA_ATTR_FORCE_COHERENT;
  1720. else if (attr == 2)
  1721. dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
  1722. else if (attr == 3)
  1723. dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  1724. else
  1725. goto invalid_format;
  1726. dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
  1727. retval = count;
  1728. pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
  1729. out:
  1730. return retval;
  1731. invalid_format:
  1732. pr_err("Invalid format. Expected: iova,len, dma attr\n");
  1733. return retval;
  1734. }
  1735. static const struct file_operations iommu_debug_dma_unmap_fops = {
  1736. .open = simple_open,
  1737. .write = iommu_debug_dma_unmap_write,
  1738. };
  1739. static ssize_t iommu_debug_config_clocks_write(struct file *file,
  1740. const char __user *ubuf,
  1741. size_t count, loff_t *offset)
  1742. {
  1743. char buf;
  1744. struct iommu_debug_device *ddev = file->private_data;
  1745. struct device *dev = ddev->dev;
  1746. /* we're expecting a single character plus (optionally) a newline */
  1747. if (count > 2) {
  1748. dev_err(dev, "Invalid value\n");
  1749. return -EINVAL;
  1750. }
  1751. if (!ddev->domain) {
  1752. dev_err(dev, "No domain. Did you already attach?\n");
  1753. return -EINVAL;
  1754. }
  1755. if (copy_from_user(&buf, ubuf, 1)) {
  1756. dev_err(dev, "Couldn't copy from user\n");
  1757. return -EFAULT;
  1758. }
  1759. mutex_lock(&ddev->clk_lock);
  1760. switch (buf) {
  1761. case '0':
  1762. if (ddev->clk_count == 0) {
  1763. dev_err(dev, "Config clocks already disabled\n");
  1764. break;
  1765. }
  1766. if (--ddev->clk_count > 0)
  1767. break;
  1768. dev_err(dev, "Disabling config clocks\n");
  1769. iommu_disable_config_clocks(ddev->domain);
  1770. break;
  1771. case '1':
  1772. if (ddev->clk_count++ > 0)
  1773. break;
  1774. dev_err(dev, "Enabling config clocks\n");
  1775. if (iommu_enable_config_clocks(ddev->domain))
  1776. dev_err(dev, "Failed!\n");
  1777. break;
  1778. default:
  1779. dev_err(dev, "Invalid value. Should be 0 or 1.\n");
  1780. mutex_unlock(&ddev->clk_lock);
  1781. return -EINVAL;
  1782. }
  1783. mutex_unlock(&ddev->clk_lock);
  1784. return count;
  1785. }
  1786. static const struct file_operations iommu_debug_config_clocks_fops = {
  1787. .open = simple_open,
  1788. .write = iommu_debug_config_clocks_write,
  1789. };
  1790. static ssize_t iommu_debug_trigger_fault_write(
  1791. struct file *file, const char __user *ubuf, size_t count,
  1792. loff_t *offset)
  1793. {
  1794. struct iommu_debug_device *ddev = file->private_data;
  1795. unsigned long flags;
  1796. if (!ddev->domain) {
  1797. pr_err("No domain. Did you already attach?\n");
  1798. return -EINVAL;
  1799. }
  1800. if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
  1801. pr_err("Invalid flags format\n");
  1802. return -EFAULT;
  1803. }
  1804. iommu_trigger_fault(ddev->domain, flags);
  1805. return count;
  1806. }
  1807. static const struct file_operations iommu_debug_trigger_fault_fops = {
  1808. .open = simple_open,
  1809. .write = iommu_debug_trigger_fault_write,
  1810. };
  1811. /*
  1812. * The following will only work for drivers that implement the generic
  1813. * device tree bindings described in
  1814. * Documentation/devicetree/bindings/iommu/iommu.txt
  1815. */
  1816. static int snarf_iommu_devices(struct device *dev, void *ignored)
  1817. {
  1818. struct iommu_debug_device *ddev;
  1819. struct dentry *dir;
  1820. if (!of_find_property(dev->of_node, "iommus", NULL))
  1821. return 0;
  1822. if (!of_device_is_compatible(dev->of_node, "iommu-debug-test"))
  1823. return 0;
  1824. /* Hold a reference count */
  1825. if (!iommu_group_get(dev))
  1826. return 0;
  1827. ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
  1828. if (!ddev)
  1829. return -ENODEV;
  1830. mutex_init(&ddev->clk_lock);
  1831. ddev->dev = dev;
  1832. dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
  1833. if (!dir) {
  1834. pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
  1835. dev_name(dev));
  1836. goto err;
  1837. }
  1838. if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
  1839. &iommu_debug_nr_iters_ops)) {
  1840. pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
  1841. dev_name(dev));
  1842. goto err_rmdir;
  1843. }
  1844. if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
  1845. &iommu_debug_test_virt_addr_fops)) {
  1846. pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
  1847. dev_name(dev));
  1848. goto err_rmdir;
  1849. }
  1850. if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
  1851. &iommu_debug_profiling_fops)) {
  1852. pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
  1853. dev_name(dev));
  1854. goto err_rmdir;
  1855. }
  1856. if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
  1857. &iommu_debug_secure_profiling_fops)) {
  1858. pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
  1859. dev_name(dev));
  1860. goto err_rmdir;
  1861. }
  1862. if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
  1863. &iommu_debug_profiling_fast_fops)) {
  1864. pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
  1865. dev_name(dev));
  1866. goto err_rmdir;
  1867. }
  1868. if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
  1869. &iommu_debug_profiling_fast_dma_api_fops)) {
  1870. pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
  1871. dev_name(dev));
  1872. goto err_rmdir;
  1873. }
  1874. if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
  1875. &iommu_debug_functional_fast_dma_api_fops)) {
  1876. pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
  1877. dev_name(dev));
  1878. goto err_rmdir;
  1879. }
  1880. if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
  1881. &iommu_debug_functional_arm_dma_api_fops)) {
  1882. pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
  1883. dev_name(dev));
  1884. goto err_rmdir;
  1885. }
  1886. if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
  1887. &iommu_debug_dma_attach_fops)) {
  1888. pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
  1889. dev_name(dev));
  1890. goto err_rmdir;
  1891. }
  1892. if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
  1893. &iommu_debug_attach_fops)) {
  1894. pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
  1895. dev_name(dev));
  1896. goto err_rmdir;
  1897. }
  1898. if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
  1899. &iommu_debug_secure_attach_fops)) {
  1900. pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
  1901. dev_name(dev));
  1902. goto err_rmdir;
  1903. }
  1904. if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
  1905. &iommu_debug_atos_fops)) {
  1906. pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
  1907. dev_name(dev));
  1908. goto err_rmdir;
  1909. }
  1910. if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
  1911. &iommu_debug_dma_atos_fops)) {
  1912. pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
  1913. dev_name(dev));
  1914. goto err_rmdir;
  1915. }
  1916. if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
  1917. &iommu_debug_map_fops)) {
  1918. pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
  1919. dev_name(dev));
  1920. goto err_rmdir;
  1921. }
  1922. if (!debugfs_create_file("dma_map", 0600, dir, ddev,
  1923. &iommu_debug_dma_map_fops)) {
  1924. pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
  1925. dev_name(dev));
  1926. goto err_rmdir;
  1927. }
  1928. if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
  1929. &iommu_debug_unmap_fops)) {
  1930. pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
  1931. dev_name(dev));
  1932. goto err_rmdir;
  1933. }
  1934. if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
  1935. &iommu_debug_dma_unmap_fops)) {
  1936. pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
  1937. dev_name(dev));
  1938. goto err_rmdir;
  1939. }
  1940. if (!debugfs_create_file("pte", 0600, dir, ddev,
  1941. &iommu_debug_pte_fops)) {
  1942. pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
  1943. dev_name(dev));
  1944. goto err_rmdir;
  1945. }
  1946. if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
  1947. &iommu_debug_config_clocks_fops)) {
  1948. pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
  1949. dev_name(dev));
  1950. goto err_rmdir;
  1951. }
  1952. if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
  1953. &iommu_debug_trigger_fault_fops)) {
  1954. pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
  1955. dev_name(dev));
  1956. goto err_rmdir;
  1957. }
  1958. list_add(&ddev->list, &iommu_debug_devices);
  1959. return 0;
  1960. err_rmdir:
  1961. debugfs_remove_recursive(dir);
  1962. err:
  1963. kfree(ddev);
  1964. return 0;
  1965. }
  1966. static int iommu_debug_init_tests(void)
  1967. {
  1968. debugfs_tests_dir = debugfs_create_dir("tests",
  1969. iommu_debugfs_top);
  1970. if (!debugfs_tests_dir) {
  1971. pr_err("Couldn't create iommu/tests debugfs directory\n");
  1972. return -ENODEV;
  1973. }
  1974. test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
  1975. if (!test_virt_addr)
  1976. return -ENOMEM;
  1977. return bus_for_each_dev(&platform_bus_type, NULL, NULL,
  1978. snarf_iommu_devices);
  1979. }
  1980. static void iommu_debug_destroy_tests(void)
  1981. {
  1982. debugfs_remove_recursive(debugfs_tests_dir);
  1983. }
  1984. #else
  1985. static inline int iommu_debug_init_tests(void) { return 0; }
  1986. static inline void iommu_debug_destroy_tests(void) { }
  1987. #endif
  1988. /*
  1989. * This isn't really a "driver", we just need something in the device tree
  1990. * so that our tests can run without any client drivers, and our tests rely
  1991. * on parsing the device tree for nodes with the `iommus' property.
  1992. */
  1993. static int iommu_debug_pass(struct platform_device *pdev)
  1994. {
  1995. return 0;
  1996. }
  1997. static const struct of_device_id iommu_debug_of_match[] = {
  1998. { .compatible = "iommu-debug-test" },
  1999. { },
  2000. };
  2001. static struct platform_driver iommu_debug_driver = {
  2002. .probe = iommu_debug_pass,
  2003. .remove = iommu_debug_pass,
  2004. .driver = {
  2005. .name = "iommu-debug",
  2006. .of_match_table = iommu_debug_of_match,
  2007. },
  2008. };
  2009. static int iommu_debug_init(void)
  2010. {
  2011. if (iommu_debug_init_tests())
  2012. return -ENODEV;
  2013. return platform_driver_register(&iommu_debug_driver);
  2014. }
  2015. static void iommu_debug_exit(void)
  2016. {
  2017. platform_driver_unregister(&iommu_debug_driver);
  2018. iommu_debug_destroy_tests();
  2019. }
  2020. module_init(iommu_debug_init);
  2021. module_exit(iommu_debug_exit);