rockchip-iommu.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License version 2 as
  4. * published by the Free Software Foundation.
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/delay.h>
  8. #include <linux/device.h>
  9. #include <linux/dma-iommu.h>
  10. #include <linux/errno.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/iommu.h>
  14. #include <linux/jiffies.h>
  15. #include <linux/list.h>
  16. #include <linux/mm.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. /** MMU register offsets */
  24. #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
  25. #define RK_MMU_STATUS 0x04
  26. #define RK_MMU_COMMAND 0x08
  27. #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
  28. #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
  29. #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
  30. #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
  31. #define RK_MMU_INT_MASK 0x1C /* IRQ enable */
  32. #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
  33. #define RK_MMU_AUTO_GATING 0x24
  34. #define DTE_ADDR_DUMMY 0xCAFEBABE
  35. #define FORCE_RESET_TIMEOUT 100 /* ms */
  36. /* RK_MMU_STATUS fields */
  37. #define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
  38. #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
  39. #define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
  40. #define RK_MMU_STATUS_IDLE BIT(3)
  41. #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
  42. #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
  43. #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
  44. /* RK_MMU_COMMAND command values */
  45. #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
  46. #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
  47. #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
  48. #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
  49. #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
  50. #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
  51. #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
  52. /* RK_MMU_INT_* register fields */
  53. #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
  54. #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
  55. #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
  56. #define NUM_DT_ENTRIES 1024
  57. #define NUM_PT_ENTRIES 1024
  58. #define SPAGE_ORDER 12
  59. #define SPAGE_SIZE (1 << SPAGE_ORDER)
  60. /*
  61. * Support mapping any size that fits in one page table:
  62. * 4 KiB to 4 MiB
  63. */
  64. #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
  65. #define IOMMU_REG_POLL_COUNT_FAST 1000
  66. struct rk_iommu_domain {
  67. struct list_head iommus;
  68. struct platform_device *pdev;
  69. u32 *dt; /* page directory table */
  70. dma_addr_t dt_dma;
  71. spinlock_t iommus_lock; /* lock for iommus list */
  72. spinlock_t dt_lock; /* lock for modifying page directory table */
  73. struct iommu_domain domain;
  74. };
  75. struct rk_iommu {
  76. struct device *dev;
  77. void __iomem **bases;
  78. int num_mmu;
  79. int irq;
  80. struct list_head node; /* entry in rk_iommu_domain.iommus */
  81. struct iommu_domain *domain; /* domain to which iommu is attached */
  82. };
  83. static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
  84. unsigned int count)
  85. {
  86. size_t size = count * sizeof(u32); /* count of u32 entry */
  87. dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
  88. }
  89. static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
  90. {
  91. return container_of(dom, struct rk_iommu_domain, domain);
  92. }
  93. /**
  94. * Inspired by _wait_for in intel_drv.h
  95. * This is NOT safe for use in interrupt context.
  96. *
  97. * Note that it's important that we check the condition again after having
  98. * timed out, since the timeout could be due to preemption or similar and
  99. * we've never had a chance to check the condition before the timeout.
  100. */
  101. #define rk_wait_for(COND, MS) ({ \
  102. unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
  103. int ret__ = 0; \
  104. while (!(COND)) { \
  105. if (time_after(jiffies, timeout__)) { \
  106. ret__ = (COND) ? 0 : -ETIMEDOUT; \
  107. break; \
  108. } \
  109. usleep_range(50, 100); \
  110. } \
  111. ret__; \
  112. })
  113. /*
  114. * The Rockchip rk3288 iommu uses a 2-level page table.
  115. * The first level is the "Directory Table" (DT).
  116. * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
  117. * to a "Page Table".
  118. * The second level is the 1024 Page Tables (PT).
  119. * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
  120. * a 4 KB page of physical memory.
  121. *
  122. * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
  123. * Each iommu device has a MMU_DTE_ADDR register that contains the physical
  124. * address of the start of the DT page.
  125. *
  126. * The structure of the page table is as follows:
  127. *
  128. * DT
  129. * MMU_DTE_ADDR -> +-----+
  130. * | |
  131. * +-----+ PT
  132. * | DTE | -> +-----+
  133. * +-----+ | | Memory
  134. * | | +-----+ Page
  135. * | | | PTE | -> +-----+
  136. * +-----+ +-----+ | |
  137. * | | | |
  138. * | | | |
  139. * +-----+ | |
  140. * | |
  141. * | |
  142. * +-----+
  143. */
  144. /*
  145. * Each DTE has a PT address and a valid bit:
  146. * +---------------------+-----------+-+
  147. * | PT address | Reserved |V|
  148. * +---------------------+-----------+-+
  149. * 31:12 - PT address (PTs always starts on a 4 KB boundary)
  150. * 11: 1 - Reserved
  151. * 0 - 1 if PT @ PT address is valid
  152. */
  153. #define RK_DTE_PT_ADDRESS_MASK 0xfffff000
  154. #define RK_DTE_PT_VALID BIT(0)
  155. static inline phys_addr_t rk_dte_pt_address(u32 dte)
  156. {
  157. return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
  158. }
  159. static inline bool rk_dte_is_pt_valid(u32 dte)
  160. {
  161. return dte & RK_DTE_PT_VALID;
  162. }
  163. static inline u32 rk_mk_dte(dma_addr_t pt_dma)
  164. {
  165. return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
  166. }
  167. /*
  168. * Each PTE has a Page address, some flags and a valid bit:
  169. * +---------------------+---+-------+-+
  170. * | Page address |Rsv| Flags |V|
  171. * +---------------------+---+-------+-+
  172. * 31:12 - Page address (Pages always start on a 4 KB boundary)
  173. * 11: 9 - Reserved
  174. * 8: 1 - Flags
  175. * 8 - Read allocate - allocate cache space on read misses
  176. * 7 - Read cache - enable cache & prefetch of data
  177. * 6 - Write buffer - enable delaying writes on their way to memory
  178. * 5 - Write allocate - allocate cache space on write misses
  179. * 4 - Write cache - different writes can be merged together
  180. * 3 - Override cache attributes
  181. * if 1, bits 4-8 control cache attributes
  182. * if 0, the system bus defaults are used
  183. * 2 - Writable
  184. * 1 - Readable
  185. * 0 - 1 if Page @ Page address is valid
  186. */
  187. #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
  188. #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
  189. #define RK_PTE_PAGE_WRITABLE BIT(2)
  190. #define RK_PTE_PAGE_READABLE BIT(1)
  191. #define RK_PTE_PAGE_VALID BIT(0)
  192. static inline phys_addr_t rk_pte_page_address(u32 pte)
  193. {
  194. return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
  195. }
  196. static inline bool rk_pte_is_page_valid(u32 pte)
  197. {
  198. return pte & RK_PTE_PAGE_VALID;
  199. }
  200. /* TODO: set cache flags per prot IOMMU_CACHE */
  201. static u32 rk_mk_pte(phys_addr_t page, int prot)
  202. {
  203. u32 flags = 0;
  204. flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
  205. flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
  206. page &= RK_PTE_PAGE_ADDRESS_MASK;
  207. return page | flags | RK_PTE_PAGE_VALID;
  208. }
  209. static u32 rk_mk_pte_invalid(u32 pte)
  210. {
  211. return pte & ~RK_PTE_PAGE_VALID;
  212. }
  213. /*
  214. * rk3288 iova (IOMMU Virtual Address) format
  215. * 31 22.21 12.11 0
  216. * +-----------+-----------+-------------+
  217. * | DTE index | PTE index | Page offset |
  218. * +-----------+-----------+-------------+
  219. * 31:22 - DTE index - index of DTE in DT
  220. * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
  221. * 11: 0 - Page offset - offset into page @ PTE.page_address
  222. */
  223. #define RK_IOVA_DTE_MASK 0xffc00000
  224. #define RK_IOVA_DTE_SHIFT 22
  225. #define RK_IOVA_PTE_MASK 0x003ff000
  226. #define RK_IOVA_PTE_SHIFT 12
  227. #define RK_IOVA_PAGE_MASK 0x00000fff
  228. #define RK_IOVA_PAGE_SHIFT 0
  229. static u32 rk_iova_dte_index(dma_addr_t iova)
  230. {
  231. return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
  232. }
  233. static u32 rk_iova_pte_index(dma_addr_t iova)
  234. {
  235. return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
  236. }
  237. static u32 rk_iova_page_offset(dma_addr_t iova)
  238. {
  239. return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
  240. }
  241. static u32 rk_iommu_read(void __iomem *base, u32 offset)
  242. {
  243. return readl(base + offset);
  244. }
  245. static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
  246. {
  247. writel(value, base + offset);
  248. }
  249. static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
  250. {
  251. int i;
  252. for (i = 0; i < iommu->num_mmu; i++)
  253. writel(command, iommu->bases[i] + RK_MMU_COMMAND);
  254. }
  255. static void rk_iommu_base_command(void __iomem *base, u32 command)
  256. {
  257. writel(command, base + RK_MMU_COMMAND);
  258. }
  259. static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
  260. size_t size)
  261. {
  262. int i;
  263. dma_addr_t iova_end = iova + size;
  264. /*
  265. * TODO(djkurtz): Figure out when it is more efficient to shootdown the
  266. * entire iotlb rather than iterate over individual iovas.
  267. */
  268. for (i = 0; i < iommu->num_mmu; i++)
  269. for (; iova < iova_end; iova += SPAGE_SIZE)
  270. rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
  271. }
  272. static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
  273. {
  274. bool active = true;
  275. int i;
  276. for (i = 0; i < iommu->num_mmu; i++)
  277. active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
  278. RK_MMU_STATUS_STALL_ACTIVE);
  279. return active;
  280. }
  281. static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
  282. {
  283. bool enable = true;
  284. int i;
  285. for (i = 0; i < iommu->num_mmu; i++)
  286. enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
  287. RK_MMU_STATUS_PAGING_ENABLED);
  288. return enable;
  289. }
  290. static int rk_iommu_enable_stall(struct rk_iommu *iommu)
  291. {
  292. int ret, i;
  293. if (rk_iommu_is_stall_active(iommu))
  294. return 0;
  295. /* Stall can only be enabled if paging is enabled */
  296. if (!rk_iommu_is_paging_enabled(iommu))
  297. return 0;
  298. rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
  299. ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
  300. if (ret)
  301. for (i = 0; i < iommu->num_mmu; i++)
  302. dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
  303. rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
  304. return ret;
  305. }
  306. static int rk_iommu_disable_stall(struct rk_iommu *iommu)
  307. {
  308. int ret, i;
  309. if (!rk_iommu_is_stall_active(iommu))
  310. return 0;
  311. rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
  312. ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
  313. if (ret)
  314. for (i = 0; i < iommu->num_mmu; i++)
  315. dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
  316. rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
  317. return ret;
  318. }
  319. static int rk_iommu_enable_paging(struct rk_iommu *iommu)
  320. {
  321. int ret, i;
  322. if (rk_iommu_is_paging_enabled(iommu))
  323. return 0;
  324. rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
  325. ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
  326. if (ret)
  327. for (i = 0; i < iommu->num_mmu; i++)
  328. dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
  329. rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
  330. return ret;
  331. }
  332. static int rk_iommu_disable_paging(struct rk_iommu *iommu)
  333. {
  334. int ret, i;
  335. if (!rk_iommu_is_paging_enabled(iommu))
  336. return 0;
  337. rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
  338. ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
  339. if (ret)
  340. for (i = 0; i < iommu->num_mmu; i++)
  341. dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
  342. rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
  343. return ret;
  344. }
  345. static int rk_iommu_force_reset(struct rk_iommu *iommu)
  346. {
  347. int ret, i;
  348. u32 dte_addr;
  349. /*
  350. * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
  351. * and verifying that upper 5 nybbles are read back.
  352. */
  353. for (i = 0; i < iommu->num_mmu; i++) {
  354. rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
  355. dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
  356. if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
  357. dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
  358. return -EFAULT;
  359. }
  360. }
  361. rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
  362. for (i = 0; i < iommu->num_mmu; i++) {
  363. ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
  364. FORCE_RESET_TIMEOUT);
  365. if (ret) {
  366. dev_err(iommu->dev, "FORCE_RESET command timed out\n");
  367. return ret;
  368. }
  369. }
  370. return 0;
  371. }
  372. static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
  373. {
  374. void __iomem *base = iommu->bases[index];
  375. u32 dte_index, pte_index, page_offset;
  376. u32 mmu_dte_addr;
  377. phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
  378. u32 *dte_addr;
  379. u32 dte;
  380. phys_addr_t pte_addr_phys = 0;
  381. u32 *pte_addr = NULL;
  382. u32 pte = 0;
  383. phys_addr_t page_addr_phys = 0;
  384. u32 page_flags = 0;
  385. dte_index = rk_iova_dte_index(iova);
  386. pte_index = rk_iova_pte_index(iova);
  387. page_offset = rk_iova_page_offset(iova);
  388. mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
  389. mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
  390. dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
  391. dte_addr = phys_to_virt(dte_addr_phys);
  392. dte = *dte_addr;
  393. if (!rk_dte_is_pt_valid(dte))
  394. goto print_it;
  395. pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
  396. pte_addr = phys_to_virt(pte_addr_phys);
  397. pte = *pte_addr;
  398. if (!rk_pte_is_page_valid(pte))
  399. goto print_it;
  400. page_addr_phys = rk_pte_page_address(pte) + page_offset;
  401. page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
  402. print_it:
  403. dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
  404. &iova, dte_index, pte_index, page_offset);
  405. dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
  406. &mmu_dte_addr_phys, &dte_addr_phys, dte,
  407. rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
  408. rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
  409. }
  410. static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
  411. {
  412. struct rk_iommu *iommu = dev_id;
  413. u32 status;
  414. u32 int_status;
  415. dma_addr_t iova;
  416. irqreturn_t ret = IRQ_NONE;
  417. int i;
  418. for (i = 0; i < iommu->num_mmu; i++) {
  419. int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
  420. if (int_status == 0)
  421. continue;
  422. ret = IRQ_HANDLED;
  423. iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
  424. if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
  425. int flags;
  426. status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
  427. flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
  428. IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
  429. dev_err(iommu->dev, "Page fault at %pad of type %s\n",
  430. &iova,
  431. (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
  432. log_iova(iommu, i, iova);
  433. /*
  434. * Report page fault to any installed handlers.
  435. * Ignore the return code, though, since we always zap cache
  436. * and clear the page fault anyway.
  437. */
  438. if (iommu->domain)
  439. report_iommu_fault(iommu->domain, iommu->dev, iova,
  440. flags);
  441. else
  442. dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
  443. rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
  444. rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
  445. }
  446. if (int_status & RK_MMU_IRQ_BUS_ERROR)
  447. dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
  448. if (int_status & ~RK_MMU_IRQ_MASK)
  449. dev_err(iommu->dev, "unexpected int_status: %#08x\n",
  450. int_status);
  451. rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
  452. }
  453. return ret;
  454. }
  455. static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
  456. dma_addr_t iova)
  457. {
  458. struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
  459. unsigned long flags;
  460. phys_addr_t pt_phys, phys = 0;
  461. u32 dte, pte;
  462. u32 *page_table;
  463. spin_lock_irqsave(&rk_domain->dt_lock, flags);
  464. dte = rk_domain->dt[rk_iova_dte_index(iova)];
  465. if (!rk_dte_is_pt_valid(dte))
  466. goto out;
  467. pt_phys = rk_dte_pt_address(dte);
  468. page_table = (u32 *)phys_to_virt(pt_phys);
  469. pte = page_table[rk_iova_pte_index(iova)];
  470. if (!rk_pte_is_page_valid(pte))
  471. goto out;
  472. phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
  473. out:
  474. spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
  475. return phys;
  476. }
  477. static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
  478. dma_addr_t iova, size_t size)
  479. {
  480. struct list_head *pos;
  481. unsigned long flags;
  482. /* shootdown these iova from all iommus using this domain */
  483. spin_lock_irqsave(&rk_domain->iommus_lock, flags);
  484. list_for_each(pos, &rk_domain->iommus) {
  485. struct rk_iommu *iommu;
  486. iommu = list_entry(pos, struct rk_iommu, node);
  487. rk_iommu_zap_lines(iommu, iova, size);
  488. }
  489. spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
  490. }
  491. static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
  492. dma_addr_t iova, size_t size)
  493. {
  494. rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
  495. if (size > SPAGE_SIZE)
  496. rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
  497. SPAGE_SIZE);
  498. }
  499. static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
  500. dma_addr_t iova)
  501. {
  502. struct device *dev = &rk_domain->pdev->dev;
  503. u32 *page_table, *dte_addr;
  504. u32 dte_index, dte;
  505. phys_addr_t pt_phys;
  506. dma_addr_t pt_dma;
  507. assert_spin_locked(&rk_domain->dt_lock);
  508. dte_index = rk_iova_dte_index(iova);
  509. dte_addr = &rk_domain->dt[dte_index];
  510. dte = *dte_addr;
  511. if (rk_dte_is_pt_valid(dte))
  512. goto done;
  513. page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
  514. if (!page_table)
  515. return ERR_PTR(-ENOMEM);
  516. pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
  517. if (dma_mapping_error(dev, pt_dma)) {
  518. dev_err(dev, "DMA mapping error while allocating page table\n");
  519. free_page((unsigned long)page_table);
  520. return ERR_PTR(-ENOMEM);
  521. }
  522. dte = rk_mk_dte(pt_dma);
  523. *dte_addr = dte;
  524. rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
  525. rk_table_flush(rk_domain,
  526. rk_domain->dt_dma + dte_index * sizeof(u32), 1);
  527. done:
  528. pt_phys = rk_dte_pt_address(dte);
  529. return (u32 *)phys_to_virt(pt_phys);
  530. }
  531. static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
  532. u32 *pte_addr, dma_addr_t pte_dma,
  533. size_t size)
  534. {
  535. unsigned int pte_count;
  536. unsigned int pte_total = size / SPAGE_SIZE;
  537. assert_spin_locked(&rk_domain->dt_lock);
  538. for (pte_count = 0; pte_count < pte_total; pte_count++) {
  539. u32 pte = pte_addr[pte_count];
  540. if (!rk_pte_is_page_valid(pte))
  541. break;
  542. pte_addr[pte_count] = rk_mk_pte_invalid(pte);
  543. }
  544. rk_table_flush(rk_domain, pte_dma, pte_count);
  545. return pte_count * SPAGE_SIZE;
  546. }
  547. static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
  548. dma_addr_t pte_dma, dma_addr_t iova,
  549. phys_addr_t paddr, size_t size, int prot)
  550. {
  551. unsigned int pte_count;
  552. unsigned int pte_total = size / SPAGE_SIZE;
  553. phys_addr_t page_phys;
  554. assert_spin_locked(&rk_domain->dt_lock);
  555. for (pte_count = 0; pte_count < pte_total; pte_count++) {
  556. u32 pte = pte_addr[pte_count];
  557. if (rk_pte_is_page_valid(pte))
  558. goto unwind;
  559. pte_addr[pte_count] = rk_mk_pte(paddr, prot);
  560. paddr += SPAGE_SIZE;
  561. }
  562. rk_table_flush(rk_domain, pte_dma, pte_total);
  563. /*
  564. * Zap the first and last iova to evict from iotlb any previously
  565. * mapped cachelines holding stale values for its dte and pte.
  566. * We only zap the first and last iova, since only they could have
  567. * dte or pte shared with an existing mapping.
  568. */
  569. rk_iommu_zap_iova_first_last(rk_domain, iova, size);
  570. return 0;
  571. unwind:
  572. /* Unmap the range of iovas that we just mapped */
  573. rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
  574. pte_count * SPAGE_SIZE);
  575. iova += pte_count * SPAGE_SIZE;
  576. page_phys = rk_pte_page_address(pte_addr[pte_count]);
  577. pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
  578. &iova, &page_phys, &paddr, prot);
  579. return -EADDRINUSE;
  580. }
  581. static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
  582. phys_addr_t paddr, size_t size, int prot)
  583. {
  584. struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
  585. unsigned long flags;
  586. dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
  587. u32 *page_table, *pte_addr;
  588. u32 dte_index, pte_index;
  589. int ret;
  590. spin_lock_irqsave(&rk_domain->dt_lock, flags);
  591. /*
  592. * pgsize_bitmap specifies iova sizes that fit in one page table
  593. * (1024 4-KiB pages = 4 MiB).
  594. * So, size will always be 4096 <= size <= 4194304.
  595. * Since iommu_map() guarantees that both iova and size will be
  596. * aligned, we will always only be mapping from a single dte here.
  597. */
  598. page_table = rk_dte_get_page_table(rk_domain, iova);
  599. if (IS_ERR(page_table)) {
  600. spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
  601. return PTR_ERR(page_table);
  602. }
  603. dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
  604. pte_index = rk_iova_pte_index(iova);
  605. pte_addr = &page_table[pte_index];
  606. pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
  607. ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
  608. paddr, size, prot);
  609. spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
  610. return ret;
  611. }
  612. static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
  613. size_t size)
  614. {
  615. struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
  616. unsigned long flags;
  617. dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
  618. phys_addr_t pt_phys;
  619. u32 dte;
  620. u32 *pte_addr;
  621. size_t unmap_size;
  622. spin_lock_irqsave(&rk_domain->dt_lock, flags);
  623. /*
  624. * pgsize_bitmap specifies iova sizes that fit in one page table
  625. * (1024 4-KiB pages = 4 MiB).
  626. * So, size will always be 4096 <= size <= 4194304.
  627. * Since iommu_unmap() guarantees that both iova and size will be
  628. * aligned, we will always only be unmapping from a single dte here.
  629. */
  630. dte = rk_domain->dt[rk_iova_dte_index(iova)];
  631. /* Just return 0 if iova is unmapped */
  632. if (!rk_dte_is_pt_valid(dte)) {
  633. spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
  634. return 0;
  635. }
  636. pt_phys = rk_dte_pt_address(dte);
  637. pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
  638. pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
  639. unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
  640. spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
  641. /* Shootdown iotlb entries for iova range that was just unmapped */
  642. rk_iommu_zap_iova(rk_domain, iova, unmap_size);
  643. return unmap_size;
  644. }
  645. static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
  646. {
  647. struct iommu_group *group;
  648. struct device *iommu_dev;
  649. struct rk_iommu *rk_iommu;
  650. group = iommu_group_get(dev);
  651. if (!group)
  652. return NULL;
  653. iommu_dev = iommu_group_get_iommudata(group);
  654. rk_iommu = dev_get_drvdata(iommu_dev);
  655. iommu_group_put(group);
  656. return rk_iommu;
  657. }
  658. static int rk_iommu_attach_device(struct iommu_domain *domain,
  659. struct device *dev)
  660. {
  661. struct rk_iommu *iommu;
  662. struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
  663. unsigned long flags;
  664. int ret, i;
  665. /*
  666. * Allow 'virtual devices' (e.g., drm) to attach to domain.
  667. * Such a device does not belong to an iommu group.
  668. */
  669. iommu = rk_iommu_from_dev(dev);
  670. if (!iommu)
  671. return 0;
  672. ret = rk_iommu_enable_stall(iommu);
  673. if (ret)
  674. return ret;
  675. ret = rk_iommu_force_reset(iommu);
  676. if (ret)
  677. return ret;
  678. iommu->domain = domain;
  679. ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq,
  680. IRQF_SHARED, dev_name(dev), iommu);
  681. if (ret)
  682. return ret;
  683. for (i = 0; i < iommu->num_mmu; i++) {
  684. rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
  685. rk_domain->dt_dma);
  686. rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
  687. rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
  688. }
  689. ret = rk_iommu_enable_paging(iommu);
  690. if (ret)
  691. return ret;
  692. spin_lock_irqsave(&rk_domain->iommus_lock, flags);
  693. list_add_tail(&iommu->node, &rk_domain->iommus);
  694. spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
  695. dev_dbg(dev, "Attached to iommu domain\n");
  696. rk_iommu_disable_stall(iommu);
  697. return 0;
  698. }
  699. static void rk_iommu_detach_device(struct iommu_domain *domain,
  700. struct device *dev)
  701. {
  702. struct rk_iommu *iommu;
  703. struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
  704. unsigned long flags;
  705. int i;
  706. /* Allow 'virtual devices' (eg drm) to detach from domain */
  707. iommu = rk_iommu_from_dev(dev);
  708. if (!iommu)
  709. return;
  710. spin_lock_irqsave(&rk_domain->iommus_lock, flags);
  711. list_del_init(&iommu->node);
  712. spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
  713. /* Ignore error while disabling, just keep going */
  714. rk_iommu_enable_stall(iommu);
  715. rk_iommu_disable_paging(iommu);
  716. for (i = 0; i < iommu->num_mmu; i++) {
  717. rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
  718. rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
  719. }
  720. rk_iommu_disable_stall(iommu);
  721. devm_free_irq(iommu->dev, iommu->irq, iommu);
  722. iommu->domain = NULL;
  723. dev_dbg(dev, "Detached from iommu domain\n");
  724. }
  725. static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
  726. {
  727. struct rk_iommu_domain *rk_domain;
  728. struct platform_device *pdev;
  729. struct device *iommu_dev;
  730. if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
  731. return NULL;
  732. /* Register a pdev per domain, so DMA API can base on this *dev
  733. * even some virtual master doesn't have an iommu slave
  734. */
  735. pdev = platform_device_register_simple("rk_iommu_domain",
  736. PLATFORM_DEVID_AUTO, NULL, 0);
  737. if (IS_ERR(pdev))
  738. return NULL;
  739. rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
  740. if (!rk_domain)
  741. goto err_unreg_pdev;
  742. rk_domain->pdev = pdev;
  743. if (type == IOMMU_DOMAIN_DMA &&
  744. iommu_get_dma_cookie(&rk_domain->domain))
  745. goto err_unreg_pdev;
  746. /*
  747. * rk32xx iommus use a 2 level pagetable.
  748. * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
  749. * Allocate one 4 KiB page for each table.
  750. */
  751. rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
  752. if (!rk_domain->dt)
  753. goto err_put_cookie;
  754. iommu_dev = &pdev->dev;
  755. rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
  756. SPAGE_SIZE, DMA_TO_DEVICE);
  757. if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
  758. dev_err(iommu_dev, "DMA map error for DT\n");
  759. goto err_free_dt;
  760. }
  761. rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
  762. spin_lock_init(&rk_domain->iommus_lock);
  763. spin_lock_init(&rk_domain->dt_lock);
  764. INIT_LIST_HEAD(&rk_domain->iommus);
  765. rk_domain->domain.geometry.aperture_start = 0;
  766. rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
  767. rk_domain->domain.geometry.force_aperture = true;
  768. return &rk_domain->domain;
  769. err_free_dt:
  770. free_page((unsigned long)rk_domain->dt);
  771. err_put_cookie:
  772. if (type == IOMMU_DOMAIN_DMA)
  773. iommu_put_dma_cookie(&rk_domain->domain);
  774. err_unreg_pdev:
  775. platform_device_unregister(pdev);
  776. return NULL;
  777. }
  778. static void rk_iommu_domain_free(struct iommu_domain *domain)
  779. {
  780. struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
  781. int i;
  782. WARN_ON(!list_empty(&rk_domain->iommus));
  783. for (i = 0; i < NUM_DT_ENTRIES; i++) {
  784. u32 dte = rk_domain->dt[i];
  785. if (rk_dte_is_pt_valid(dte)) {
  786. phys_addr_t pt_phys = rk_dte_pt_address(dte);
  787. u32 *page_table = phys_to_virt(pt_phys);
  788. dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
  789. SPAGE_SIZE, DMA_TO_DEVICE);
  790. free_page((unsigned long)page_table);
  791. }
  792. }
  793. dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
  794. SPAGE_SIZE, DMA_TO_DEVICE);
  795. free_page((unsigned long)rk_domain->dt);
  796. if (domain->type == IOMMU_DOMAIN_DMA)
  797. iommu_put_dma_cookie(&rk_domain->domain);
  798. platform_device_unregister(rk_domain->pdev);
  799. }
  800. static bool rk_iommu_is_dev_iommu_master(struct device *dev)
  801. {
  802. struct device_node *np = dev->of_node;
  803. int ret;
  804. /*
  805. * An iommu master has an iommus property containing a list of phandles
  806. * to iommu nodes, each with an #iommu-cells property with value 0.
  807. */
  808. ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
  809. return (ret > 0);
  810. }
  811. static int rk_iommu_group_set_iommudata(struct iommu_group *group,
  812. struct device *dev)
  813. {
  814. struct device_node *np = dev->of_node;
  815. struct platform_device *pd;
  816. int ret;
  817. struct of_phandle_args args;
  818. /*
  819. * An iommu master has an iommus property containing a list of phandles
  820. * to iommu nodes, each with an #iommu-cells property with value 0.
  821. */
  822. ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
  823. &args);
  824. if (ret) {
  825. dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
  826. np->full_name, ret);
  827. return ret;
  828. }
  829. if (args.args_count != 0) {
  830. dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
  831. args.np->full_name, args.args_count);
  832. return -EINVAL;
  833. }
  834. pd = of_find_device_by_node(args.np);
  835. of_node_put(args.np);
  836. if (!pd) {
  837. dev_err(dev, "iommu %s not found\n", args.np->full_name);
  838. return -EPROBE_DEFER;
  839. }
  840. /* TODO(djkurtz): handle multiple slave iommus for a single master */
  841. iommu_group_set_iommudata(group, &pd->dev, NULL);
  842. return 0;
  843. }
  844. static int rk_iommu_add_device(struct device *dev)
  845. {
  846. struct iommu_group *group;
  847. int ret;
  848. if (!rk_iommu_is_dev_iommu_master(dev))
  849. return -ENODEV;
  850. group = iommu_group_get(dev);
  851. if (!group) {
  852. group = iommu_group_alloc();
  853. if (IS_ERR(group)) {
  854. dev_err(dev, "Failed to allocate IOMMU group\n");
  855. return PTR_ERR(group);
  856. }
  857. }
  858. ret = iommu_group_add_device(group, dev);
  859. if (ret)
  860. goto err_put_group;
  861. ret = rk_iommu_group_set_iommudata(group, dev);
  862. if (ret)
  863. goto err_remove_device;
  864. iommu_group_put(group);
  865. return 0;
  866. err_remove_device:
  867. iommu_group_remove_device(dev);
  868. err_put_group:
  869. iommu_group_put(group);
  870. return ret;
  871. }
  872. static void rk_iommu_remove_device(struct device *dev)
  873. {
  874. if (!rk_iommu_is_dev_iommu_master(dev))
  875. return;
  876. iommu_group_remove_device(dev);
  877. }
  878. static const struct iommu_ops rk_iommu_ops = {
  879. .domain_alloc = rk_iommu_domain_alloc,
  880. .domain_free = rk_iommu_domain_free,
  881. .attach_dev = rk_iommu_attach_device,
  882. .detach_dev = rk_iommu_detach_device,
  883. .map = rk_iommu_map,
  884. .unmap = rk_iommu_unmap,
  885. .map_sg = default_iommu_map_sg,
  886. .add_device = rk_iommu_add_device,
  887. .remove_device = rk_iommu_remove_device,
  888. .iova_to_phys = rk_iommu_iova_to_phys,
  889. .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
  890. };
  891. static int rk_iommu_domain_probe(struct platform_device *pdev)
  892. {
  893. struct device *dev = &pdev->dev;
  894. dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
  895. if (!dev->dma_parms)
  896. return -ENOMEM;
  897. /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
  898. arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
  899. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  900. dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
  901. return 0;
  902. }
  903. static struct platform_driver rk_iommu_domain_driver = {
  904. .probe = rk_iommu_domain_probe,
  905. .driver = {
  906. .name = "rk_iommu_domain",
  907. },
  908. };
  909. static int rk_iommu_probe(struct platform_device *pdev)
  910. {
  911. struct device *dev = &pdev->dev;
  912. struct rk_iommu *iommu;
  913. struct resource *res;
  914. int num_res = pdev->num_resources;
  915. int i;
  916. iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
  917. if (!iommu)
  918. return -ENOMEM;
  919. platform_set_drvdata(pdev, iommu);
  920. iommu->dev = dev;
  921. iommu->num_mmu = 0;
  922. iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
  923. GFP_KERNEL);
  924. if (!iommu->bases)
  925. return -ENOMEM;
  926. for (i = 0; i < num_res; i++) {
  927. res = platform_get_resource(pdev, IORESOURCE_MEM, i);
  928. if (!res)
  929. continue;
  930. iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
  931. if (IS_ERR(iommu->bases[i]))
  932. continue;
  933. iommu->num_mmu++;
  934. }
  935. if (iommu->num_mmu == 0)
  936. return PTR_ERR(iommu->bases[0]);
  937. iommu->irq = platform_get_irq(pdev, 0);
  938. if (iommu->irq < 0) {
  939. dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
  940. return -ENXIO;
  941. }
  942. return 0;
  943. }
  944. static int rk_iommu_remove(struct platform_device *pdev)
  945. {
  946. return 0;
  947. }
  948. static const struct of_device_id rk_iommu_dt_ids[] = {
  949. { .compatible = "rockchip,iommu" },
  950. { /* sentinel */ }
  951. };
  952. MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
  953. static struct platform_driver rk_iommu_driver = {
  954. .probe = rk_iommu_probe,
  955. .remove = rk_iommu_remove,
  956. .driver = {
  957. .name = "rk_iommu",
  958. .of_match_table = rk_iommu_dt_ids,
  959. },
  960. };
  961. static int __init rk_iommu_init(void)
  962. {
  963. struct device_node *np;
  964. int ret;
  965. np = of_find_matching_node(NULL, rk_iommu_dt_ids);
  966. if (!np)
  967. return 0;
  968. of_node_put(np);
  969. ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
  970. if (ret)
  971. return ret;
  972. ret = platform_driver_register(&rk_iommu_domain_driver);
  973. if (ret)
  974. return ret;
  975. ret = platform_driver_register(&rk_iommu_driver);
  976. if (ret)
  977. platform_driver_unregister(&rk_iommu_domain_driver);
  978. return ret;
  979. }
  980. static void __exit rk_iommu_exit(void)
  981. {
  982. platform_driver_unregister(&rk_iommu_driver);
  983. platform_driver_unregister(&rk_iommu_domain_driver);
  984. }
  985. subsys_initcall(rk_iommu_init);
  986. module_exit(rk_iommu_exit);
  987. MODULE_DESCRIPTION("IOMMU API for Rockchip");
  988. MODULE_AUTHOR("Simon Xue <[email protected]> and Daniel Kurtz <[email protected]>");
  989. MODULE_ALIAS("platform:rockchip-iommu");
  990. MODULE_LICENSE("GPL v2");