io-pgtable.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. #ifndef __IO_PGTABLE_H
  2. #define __IO_PGTABLE_H
  3. #include <linux/bitops.h>
  4. #include <linux/scatterlist.h>
  5. #include <soc/qcom/msm_tz_smmu.h>
  6. /*
  7. * Public API for use by IOMMU drivers
  8. */
  9. enum io_pgtable_fmt {
  10. ARM_32_LPAE_S1,
  11. ARM_32_LPAE_S2,
  12. ARM_64_LPAE_S1,
  13. ARM_64_LPAE_S2,
  14. ARM_V7S,
  15. ARM_V8L_FAST,
  16. ARM_MSM_SECURE,
  17. IO_PGTABLE_NUM_FMTS,
  18. };
  19. /**
  20. * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
  21. *
  22. * @tlb_flush_all: Synchronously invalidate the entire TLB context.
  23. * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
  24. * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
  25. * any corresponding page table updates are visible to the
  26. * IOMMU.
  27. * @alloc_pages_exact: Allocate page table memory (optional, defaults to
  28. * alloc_pages_exact)
  29. * @free_pages_exact: Free page table memory (optional, defaults to
  30. * free_pages_exact)
  31. *
  32. * Note that these can all be called in atomic context and must therefore
  33. * not block.
  34. */
  35. struct iommu_gather_ops {
  36. void (*tlb_flush_all)(void *cookie);
  37. void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
  38. bool leaf, void *cookie);
  39. void (*tlb_sync)(void *cookie);
  40. void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask);
  41. void (*free_pages_exact)(void *cookie, void *virt, size_t size);
  42. };
  43. /**
  44. * struct io_pgtable_cfg - Configuration data for a set of page tables.
  45. *
  46. * @quirks: A bitmap of hardware quirks that require some special
  47. * action by the low-level page table allocator.
  48. * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
  49. * tables.
  50. * @ias: Input address (iova) size, in bits.
  51. * @oas: Output address (paddr) size, in bits.
  52. * @tlb: TLB management callbacks for this set of tables.
  53. * @iommu_dev: The device representing the DMA configuration for the
  54. * page table walker.
  55. */
  56. struct io_pgtable_cfg {
  57. /*
  58. * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
  59. * stage 1 PTEs, for hardware which insists on validating them
  60. * even in non-secure state where they should normally be ignored.
  61. *
  62. * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
  63. * IOMMU_NOEXEC flags and map everything with full access, for
  64. * hardware which does not implement the permissions of a given
  65. * format, and/or requires some format-specific default value.
  66. *
  67. * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
  68. * (unmapped) entries but the hardware might do so anyway, perform
  69. * TLB maintenance when mapping as well as when unmapping.
  70. *
  71. * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
  72. * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
  73. * when the SoC is in "4GB mode" and they can only access the high
  74. * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
  75. *
  76. * IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT: Override the attributes
  77. * set in TCR for the page table walker. Use attributes specified
  78. * by the upstream hw instead.
  79. *
  80. * IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT: Set the page table as
  81. * coherent.
  82. *
  83. * IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE:
  84. * Having page tables which are non coherent, but cached in a
  85. * system cache requires SH=Non-Shareable. This applies to the
  86. * qsmmuv500 model. For data buffers SH=Non-Shareable is not
  87. * required.
  88. */
  89. #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
  90. #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
  91. #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
  92. #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
  93. #define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT BIT(4)
  94. #define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT BIT(5)
  95. #define IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE BIT(6)
  96. unsigned long quirks;
  97. unsigned long pgsize_bitmap;
  98. unsigned int ias;
  99. unsigned int oas;
  100. const struct iommu_gather_ops *tlb;
  101. struct device *iommu_dev;
  102. /* Low-level data specific to the table format */
  103. union {
  104. struct {
  105. u64 ttbr[2];
  106. u64 tcr;
  107. u64 mair[2];
  108. } arm_lpae_s1_cfg;
  109. struct {
  110. u64 vttbr;
  111. u64 vtcr;
  112. } arm_lpae_s2_cfg;
  113. struct {
  114. u32 ttbr[2];
  115. u32 tcr;
  116. u32 nmrr;
  117. u32 prrr;
  118. } arm_v7s_cfg;
  119. struct {
  120. u64 ttbr[2];
  121. u64 tcr;
  122. u64 mair[2];
  123. void *pmds;
  124. } av8l_fast_cfg;
  125. struct {
  126. enum tz_smmu_device_id sec_id;
  127. int cbndx;
  128. } arm_msm_secure_cfg;
  129. };
  130. };
  131. /**
  132. * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
  133. *
  134. * @map: Map a physically contiguous memory region.
  135. * @map_sg: Map a scatterlist. Returns the number of bytes mapped,
  136. * or 0 on failure. The size parameter contains the size
  137. * of the partial mapping in case of failure.
  138. * @unmap: Unmap a physically contiguous memory region.
  139. * @iova_to_phys: Translate iova to physical address.
  140. *
  141. * These functions map directly onto the iommu_ops member functions with
  142. * the same names.
  143. */
  144. struct io_pgtable_ops {
  145. int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
  146. phys_addr_t paddr, size_t size, int prot);
  147. int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
  148. struct scatterlist *sg, unsigned int nents,
  149. int prot, size_t *size);
  150. size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
  151. size_t size);
  152. phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
  153. unsigned long iova);
  154. bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
  155. unsigned long iova);
  156. uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
  157. unsigned long iova);
  158. };
  159. /**
  160. * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
  161. *
  162. * @fmt: The page table format.
  163. * @cfg: The page table configuration. This will be modified to represent
  164. * the configuration actually provided by the allocator (e.g. the
  165. * pgsize_bitmap may be restricted).
  166. * @cookie: An opaque token provided by the IOMMU driver and passed back to
  167. * the callback routines in cfg->tlb.
  168. */
  169. struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
  170. struct io_pgtable_cfg *cfg,
  171. void *cookie);
  172. /**
  173. * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
  174. * *must* ensure that the page table is no longer
  175. * live, but the TLB can be dirty.
  176. *
  177. * @ops: The ops returned from alloc_io_pgtable_ops.
  178. */
  179. void free_io_pgtable_ops(struct io_pgtable_ops *ops);
  180. /*
  181. * Internal structures for page table allocator implementations.
  182. */
  183. /**
  184. * struct io_pgtable - Internal structure describing a set of page tables.
  185. *
  186. * @fmt: The page table format.
  187. * @cookie: An opaque token provided by the IOMMU driver and passed back to
  188. * any callback routines.
  189. * @tlb_sync_pending: Private flag for optimising out redundant syncs.
  190. * @cfg: A copy of the page table configuration.
  191. * @ops: The page table operations in use for this set of page tables.
  192. */
  193. struct io_pgtable {
  194. enum io_pgtable_fmt fmt;
  195. void *cookie;
  196. bool tlb_sync_pending;
  197. struct io_pgtable_cfg cfg;
  198. struct io_pgtable_ops ops;
  199. };
  200. #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
  201. static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
  202. {
  203. if (!iop->cfg.tlb)
  204. return;
  205. iop->cfg.tlb->tlb_flush_all(iop->cookie);
  206. iop->tlb_sync_pending = true;
  207. }
  208. static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
  209. unsigned long iova, size_t size, size_t granule, bool leaf)
  210. {
  211. if (!iop->cfg.tlb)
  212. return;
  213. iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
  214. iop->tlb_sync_pending = true;
  215. }
  216. static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
  217. {
  218. if (!iop->cfg.tlb)
  219. return;
  220. if (iop->tlb_sync_pending) {
  221. iop->cfg.tlb->tlb_sync(iop->cookie);
  222. iop->tlb_sync_pending = false;
  223. }
  224. }
  225. /**
  226. * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
  227. * particular format.
  228. *
  229. * @alloc: Allocate a set of page tables described by cfg.
  230. * @free: Free the page tables associated with iop.
  231. */
  232. struct io_pgtable_init_fns {
  233. struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
  234. void (*free)(struct io_pgtable *iop);
  235. };
  236. extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
  237. extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
  238. extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
  239. extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
  240. extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
  241. extern struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns;
  242. extern struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns;
  243. /**
  244. * io_pgtable_alloc_pages_exact:
  245. * allocate an exact number of physically-contiguous pages.
  246. * @size: the number of bytes to allocate
  247. * @gfp_mask: GFP flags for the allocation
  248. *
  249. * Like alloc_pages_exact(), but with some additional accounting for debug
  250. * purposes.
  251. */
  252. void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
  253. size_t size, gfp_t gfp_mask);
  254. /**
  255. * io_pgtable_free_pages_exact:
  256. * release memory allocated via io_pgtable_alloc_pages_exact()
  257. * @virt: the value returned by alloc_pages_exact.
  258. * @size: size of allocation, same value as passed to alloc_pages_exact().
  259. *
  260. * Like free_pages_exact(), but with some additional accounting for debug
  261. * purposes.
  262. */
  263. void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
  264. void *virt, size_t size);
  265. #endif /* __IO_PGTABLE_H */