io-pgtable-msm-secure.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) "io-pgtable-msm-secure: " fmt
  13. #include <linux/iommu.h>
  14. #include <linux/kernel.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/sizes.h>
  17. #include <linux/slab.h>
  18. #include <linux/types.h>
  19. #include <soc/qcom/scm.h>
  20. #include <linux/dma-mapping.h>
  21. #include <asm/cacheflush.h>
  22. #include "io-pgtable.h"
  23. #define IOMMU_SECURE_PTBL_SIZE 3
  24. #define IOMMU_SECURE_PTBL_INIT 4
  25. #define IOMMU_SECURE_MAP2_FLAT 0x12
  26. #define IOMMU_SECURE_UNMAP2_FLAT 0x13
  27. #define IOMMU_TLBINVAL_FLAG 0x00000001
  28. #define io_pgtable_to_data(x) \
  29. container_of((x), struct msm_secure_io_pgtable, iop)
  30. #define io_pgtable_ops_to_pgtable(x) \
  31. container_of((x), struct io_pgtable, ops)
  32. #define io_pgtable_ops_to_data(x) \
  33. io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
  34. struct msm_secure_io_pgtable {
  35. struct io_pgtable iop;
  36. /* lock required while operating on page tables */
  37. struct mutex pgtbl_lock;
  38. };
  39. int msm_iommu_sec_pgtbl_init(void)
  40. {
  41. struct msm_scm_ptbl_init {
  42. unsigned int paddr;
  43. unsigned int size;
  44. unsigned int spare;
  45. } pinit = {0};
  46. int psize[2] = {0, 0};
  47. unsigned int spare = 0;
  48. int ret, ptbl_ret = 0;
  49. struct device dev = {0};
  50. void *cpu_addr;
  51. dma_addr_t paddr;
  52. unsigned long attrs = 0;
  53. struct scm_desc desc = {0};
  54. if (!is_scm_armv8()) {
  55. ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
  56. sizeof(spare), psize, sizeof(psize));
  57. } else {
  58. struct scm_desc desc = {0};
  59. desc.args[0] = spare;
  60. desc.arginfo = SCM_ARGS(1);
  61. ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
  62. IOMMU_SECURE_PTBL_SIZE), &desc);
  63. psize[0] = desc.ret[0];
  64. psize[1] = desc.ret[1];
  65. }
  66. if (ret || psize[1]) {
  67. pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
  68. goto fail;
  69. }
  70. /* Now allocate memory for the secure page tables */
  71. attrs = DMA_ATTR_NO_KERNEL_MAPPING;
  72. dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
  73. arch_setup_dma_ops(&dev, 0, 0, NULL, 0);
  74. cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs);
  75. if (!cpu_addr) {
  76. pr_err("%s: Failed to allocate %d bytes for PTBL\n",
  77. __func__, psize[0]);
  78. ret = -ENOMEM;
  79. goto fail;
  80. }
  81. pinit.paddr = (unsigned int)paddr;
  82. /* paddr may be a physical address > 4GB */
  83. desc.args[0] = paddr;
  84. desc.args[1] = pinit.size = psize[0];
  85. desc.args[2] = pinit.spare;
  86. desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
  87. if (!is_scm_armv8()) {
  88. ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
  89. sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
  90. } else {
  91. ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
  92. IOMMU_SECURE_PTBL_INIT), &desc);
  93. ptbl_ret = desc.ret[0];
  94. }
  95. if (ret) {
  96. pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
  97. goto fail_mem;
  98. }
  99. if (ptbl_ret) {
  100. pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
  101. goto fail_mem;
  102. }
  103. return 0;
  104. fail_mem:
  105. dma_free_attrs(&dev, psize[0], cpu_addr, paddr, attrs);
  106. fail:
  107. return ret;
  108. }
  109. EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init);
  110. static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
  111. phys_addr_t paddr, size_t size, int iommu_prot)
  112. {
  113. struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
  114. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  115. void *flush_va, *flush_va_end;
  116. struct scm_desc desc = {0};
  117. int ret = -EINVAL;
  118. u32 resp;
  119. if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) ||
  120. !IS_ALIGNED(size, SZ_1M))
  121. return -EINVAL;
  122. desc.args[0] = virt_to_phys(&paddr);
  123. desc.args[1] = 1;
  124. desc.args[2] = size;
  125. desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
  126. desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
  127. desc.args[5] = iova;
  128. desc.args[6] = size;
  129. desc.args[7] = 0;
  130. flush_va = &paddr;
  131. flush_va_end = (void *)
  132. (((unsigned long) flush_va) + sizeof(phys_addr_t));
  133. mutex_lock(&data->pgtbl_lock);
  134. /*
  135. * Ensure that the buffer is in RAM by the time it gets to TZ
  136. */
  137. dmac_clean_range(flush_va, flush_va_end);
  138. desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
  139. SCM_VAL, SCM_VAL, SCM_VAL);
  140. if (is_scm_armv8()) {
  141. ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
  142. IOMMU_SECURE_MAP2_FLAT), &desc);
  143. resp = desc.ret[0];
  144. }
  145. mutex_unlock(&data->pgtbl_lock);
  146. if (ret || resp)
  147. return -EINVAL;
  148. return 0;
  149. }
  150. static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg)
  151. {
  152. /*
  153. * Try sg_dma_address first so that we can
  154. * map carveout regions that do not have a
  155. * struct page associated with them.
  156. */
  157. dma_addr_t pa = sg_dma_address(sg);
  158. if (pa == 0)
  159. pa = sg_phys(sg);
  160. return pa;
  161. }
  162. static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
  163. struct scatterlist *sg, unsigned int nents,
  164. int iommu_prot, size_t *size)
  165. {
  166. struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
  167. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  168. int ret = -EINVAL;
  169. struct scatterlist *tmp, *sgiter;
  170. dma_addr_t *pa_list = 0;
  171. unsigned int cnt, offset = 0, chunk_offset = 0;
  172. dma_addr_t pa;
  173. void *flush_va, *flush_va_end;
  174. unsigned long len = 0;
  175. struct scm_desc desc = {0};
  176. int i;
  177. u32 resp;
  178. for_each_sg(sg, tmp, nents, i)
  179. len += tmp->length;
  180. if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
  181. return -EINVAL;
  182. if (sg->length == len) {
  183. cnt = 1;
  184. pa = msm_secure_get_phys_addr(sg);
  185. if (!IS_ALIGNED(pa, SZ_1M))
  186. return -EINVAL;
  187. desc.args[0] = virt_to_phys(&pa);
  188. desc.args[1] = cnt;
  189. desc.args[2] = len;
  190. flush_va = &pa;
  191. } else {
  192. sgiter = sg;
  193. if (!IS_ALIGNED(sgiter->length, SZ_1M))
  194. return -EINVAL;
  195. cnt = sg->length / SZ_1M;
  196. while ((sgiter = sg_next(sgiter))) {
  197. if (!IS_ALIGNED(sgiter->length, SZ_1M))
  198. return -EINVAL;
  199. cnt += sgiter->length / SZ_1M;
  200. }
  201. pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL);
  202. if (!pa_list)
  203. return -ENOMEM;
  204. sgiter = sg;
  205. cnt = 0;
  206. pa = msm_secure_get_phys_addr(sgiter);
  207. while (offset < len) {
  208. if (!IS_ALIGNED(pa, SZ_1M)) {
  209. kfree(pa_list);
  210. return -EINVAL;
  211. }
  212. pa_list[cnt] = pa + chunk_offset;
  213. chunk_offset += SZ_1M;
  214. offset += SZ_1M;
  215. cnt++;
  216. if (chunk_offset >= sgiter->length && offset < len) {
  217. chunk_offset = 0;
  218. sgiter = sg_next(sgiter);
  219. pa = msm_secure_get_phys_addr(sgiter);
  220. }
  221. }
  222. desc.args[0] = virt_to_phys(pa_list);
  223. desc.args[1] = cnt;
  224. desc.args[2] = SZ_1M;
  225. flush_va = pa_list;
  226. }
  227. desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
  228. desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
  229. desc.args[5] = iova;
  230. desc.args[6] = len;
  231. desc.args[7] = 0;
  232. desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
  233. SCM_VAL, SCM_VAL, SCM_VAL);
  234. /*
  235. * Ensure that the buffer is in RAM by the time it gets to TZ
  236. */
  237. flush_va_end = (void *) (((unsigned long) flush_va) +
  238. (cnt * sizeof(*pa_list)));
  239. mutex_lock(&data->pgtbl_lock);
  240. dmac_clean_range(flush_va, flush_va_end);
  241. if (is_scm_armv8()) {
  242. ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
  243. IOMMU_SECURE_MAP2_FLAT), &desc);
  244. resp = desc.ret[0];
  245. if (ret || resp)
  246. ret = -EINVAL;
  247. else
  248. ret = len;
  249. }
  250. mutex_unlock(&data->pgtbl_lock);
  251. kfree(pa_list);
  252. return ret;
  253. }
  254. static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
  255. size_t len)
  256. {
  257. struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
  258. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  259. int ret = -EINVAL;
  260. struct scm_desc desc = {0};
  261. if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
  262. return ret;
  263. desc.args[0] = cfg->arm_msm_secure_cfg.sec_id;
  264. desc.args[1] = cfg->arm_msm_secure_cfg.cbndx;
  265. desc.args[2] = iova;
  266. desc.args[3] = len;
  267. desc.args[4] = IOMMU_TLBINVAL_FLAG;
  268. desc.arginfo = SCM_ARGS(5);
  269. mutex_lock(&data->pgtbl_lock);
  270. if (is_scm_armv8()) {
  271. ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
  272. IOMMU_SECURE_UNMAP2_FLAT), &desc);
  273. if (!ret)
  274. ret = len;
  275. }
  276. mutex_unlock(&data->pgtbl_lock);
  277. return ret;
  278. }
  279. static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops,
  280. unsigned long iova)
  281. {
  282. return -EINVAL;
  283. }
  284. static struct msm_secure_io_pgtable *
  285. msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
  286. {
  287. struct msm_secure_io_pgtable *data;
  288. data = kmalloc(sizeof(*data), GFP_KERNEL);
  289. if (!data)
  290. return NULL;
  291. data->iop.ops = (struct io_pgtable_ops) {
  292. .map = msm_secure_map,
  293. .map_sg = msm_secure_map_sg,
  294. .unmap = msm_secure_unmap,
  295. .iova_to_phys = msm_secure_iova_to_phys,
  296. };
  297. mutex_init(&data->pgtbl_lock);
  298. return data;
  299. }
  300. static struct io_pgtable *
  301. msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
  302. {
  303. struct msm_secure_io_pgtable *data =
  304. msm_secure_alloc_pgtable_data(cfg);
  305. return &data->iop;
  306. }
  307. static void msm_secure_free_pgtable(struct io_pgtable *iop)
  308. {
  309. struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop);
  310. kfree(data);
  311. }
  312. struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = {
  313. .alloc = msm_secure_alloc_pgtable,
  314. .free = msm_secure_free_pgtable,
  315. };