qcedev_smmu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /* Qti (or) Qualcomm Technologies Inc CE device driver.
  2. *
  3. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <asm/dma-iommu.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/list.h>
  17. #include <linux/qcedev.h>
  18. #include "qcedevi.h"
  19. #include "qcedev_smmu.h"
  20. #include "soc/qcom/secure_buffer.h"
  21. static bool compare_ion_buffers(struct qcedev_mem_client *mem_client,
  22. struct ion_handle *hndl, int fd);
  23. static int qcedev_setup_context_bank(struct context_bank_info *cb,
  24. struct device *dev)
  25. {
  26. int rc = 0;
  27. int secure_vmid = VMID_INVAL;
  28. struct bus_type *bus;
  29. if (!dev || !cb) {
  30. pr_err("%s err: invalid input params\n", __func__);
  31. return -EINVAL;
  32. }
  33. cb->dev = dev;
  34. bus = cb->dev->bus;
  35. if (IS_ERR_OR_NULL(bus)) {
  36. pr_err("%s err: failed to get bus type\n", __func__);
  37. rc = PTR_ERR(bus) ?: -ENODEV;
  38. goto remove_cb;
  39. }
  40. cb->mapping = arm_iommu_create_mapping(bus, cb->start_addr, cb->size);
  41. if (IS_ERR_OR_NULL(cb->mapping)) {
  42. pr_err("%s err: failed to create mapping\n", __func__);
  43. rc = PTR_ERR(cb->mapping) ?: -ENODEV;
  44. goto remove_cb;
  45. }
  46. if (cb->is_secure) {
  47. /* Hardcoded since we only have this vmid.*/
  48. secure_vmid = VMID_CP_BITSTREAM;
  49. rc = iommu_domain_set_attr(cb->mapping->domain,
  50. DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
  51. if (rc) {
  52. pr_err("%s err: programming secure vmid failed %s %d\n",
  53. __func__, dev_name(dev), rc);
  54. goto release_mapping;
  55. }
  56. }
  57. rc = arm_iommu_attach_device(cb->dev, cb->mapping);
  58. if (rc) {
  59. pr_err("%s err: Failed to attach %s - %d\n",
  60. __func__, dev_name(dev), rc);
  61. goto release_mapping;
  62. }
  63. pr_info("%s Attached %s and create mapping\n", __func__, dev_name(dev));
  64. pr_info("%s Context Bank name:%s, is_secure:%d, start_addr:%#x\n",
  65. __func__, cb->name, cb->is_secure, cb->start_addr);
  66. pr_info("%s size:%#x, dev:%pK, mapping:%pK\n", __func__, cb->size,
  67. cb->dev, cb->mapping);
  68. return rc;
  69. release_mapping:
  70. arm_iommu_release_mapping(cb->mapping);
  71. remove_cb:
  72. return rc;
  73. }
  74. int qcedev_parse_context_bank(struct platform_device *pdev)
  75. {
  76. struct qcedev_control *podev;
  77. struct context_bank_info *cb = NULL;
  78. struct device_node *np = NULL;
  79. int rc = 0;
  80. if (!pdev) {
  81. pr_err("%s err: invalid platform devices\n", __func__);
  82. return -EINVAL;
  83. }
  84. if (!pdev->dev.parent) {
  85. pr_err("%s err: failed to find a parent for %s\n",
  86. __func__, dev_name(&pdev->dev));
  87. return -EINVAL;
  88. }
  89. podev = dev_get_drvdata(pdev->dev.parent);
  90. np = pdev->dev.of_node;
  91. cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
  92. if (!cb) {
  93. pr_err("%s ERROR = Failed to allocate cb\n", __func__);
  94. return -ENOMEM;
  95. }
  96. INIT_LIST_HEAD(&cb->list);
  97. list_add_tail(&cb->list, &podev->context_banks);
  98. rc = of_property_read_string(np, "label", &cb->name);
  99. if (rc)
  100. pr_debug("%s ERROR = Unable to read label\n", __func__);
  101. rc = of_property_read_u32(np, "virtual-addr", &cb->start_addr);
  102. if (rc) {
  103. pr_err("%s err: cannot read virtual region addr %d\n",
  104. __func__, rc);
  105. goto err_setup_cb;
  106. }
  107. rc = of_property_read_u32(np, "virtual-size", &cb->size);
  108. if (rc) {
  109. pr_err("%s err: cannot read virtual region size %d\n",
  110. __func__, rc);
  111. goto err_setup_cb;
  112. }
  113. cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
  114. rc = qcedev_setup_context_bank(cb, &pdev->dev);
  115. if (rc) {
  116. pr_err("%s err: cannot setup context bank %d\n", __func__, rc);
  117. goto err_setup_cb;
  118. }
  119. return 0;
  120. err_setup_cb:
  121. devm_kfree(&pdev->dev, cb);
  122. list_del(&cb->list);
  123. return rc;
  124. }
  125. struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
  126. {
  127. struct qcedev_mem_client *mem_client = NULL;
  128. struct ion_client *clnt = NULL;
  129. switch (mtype) {
  130. case MEM_ION:
  131. clnt = msm_ion_client_create("qcedev_client");
  132. if (!clnt)
  133. pr_err("%s: err: failed to allocate ion client\n",
  134. __func__);
  135. break;
  136. default:
  137. pr_err("%s: err: Mem type not supported\n", __func__);
  138. }
  139. if (clnt) {
  140. mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
  141. if (!mem_client)
  142. goto err;
  143. mem_client->mtype = mtype;
  144. mem_client->client = clnt;
  145. }
  146. return mem_client;
  147. err:
  148. if (clnt)
  149. ion_client_destroy(clnt);
  150. return NULL;
  151. }
  152. void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
  153. {
  154. if (mem_client && mem_client->client)
  155. ion_client_destroy(mem_client->client);
  156. kfree(mem_client);
  157. }
  158. static bool is_iommu_present(struct qcedev_handle *qce_hndl)
  159. {
  160. return !list_empty(&qce_hndl->cntl->context_banks);
  161. }
  162. static struct context_bank_info *get_context_bank(
  163. struct qcedev_handle *qce_hndl, bool is_secure)
  164. {
  165. struct qcedev_control *podev = qce_hndl->cntl;
  166. struct context_bank_info *cb = NULL, *match = NULL;
  167. list_for_each_entry(cb, &podev->context_banks, list) {
  168. if (cb->is_secure == is_secure) {
  169. match = cb;
  170. break;
  171. }
  172. }
  173. return match;
  174. }
  175. static int ion_map_buffer(struct qcedev_handle *qce_hndl,
  176. struct qcedev_mem_client *mem_client, int fd,
  177. unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
  178. {
  179. struct ion_client *clnt = mem_client->client;
  180. struct ion_handle *hndl = NULL;
  181. unsigned long ion_flags = 0;
  182. int rc = 0;
  183. struct dma_buf *buf = NULL;
  184. struct dma_buf_attachment *attach = NULL;
  185. struct sg_table *table = NULL;
  186. struct context_bank_info *cb = NULL;
  187. buf = dma_buf_get(fd);
  188. if (IS_ERR_OR_NULL(buf))
  189. return -EINVAL;
  190. hndl = ion_import_dma_buf(clnt, buf);
  191. if (IS_ERR_OR_NULL(hndl)) {
  192. pr_err("%s: err: invalid ion_handle\n", __func__);
  193. rc = -ENOMEM;
  194. goto import_buf_err;
  195. }
  196. rc = ion_handle_get_flags(clnt, hndl, &ion_flags);
  197. if (rc) {
  198. pr_err("%s: err: failed to get ion flags: %d\n", __func__, rc);
  199. goto map_err;
  200. }
  201. if (is_iommu_present(qce_hndl)) {
  202. cb = get_context_bank(qce_hndl, ion_flags & ION_FLAG_SECURE);
  203. if (!cb) {
  204. pr_err("%s: err: failed to get context bank info\n",
  205. __func__);
  206. rc = -EIO;
  207. goto map_err;
  208. }
  209. /* Prepare a dma buf for dma on the given device */
  210. attach = dma_buf_attach(buf, cb->dev);
  211. if (IS_ERR_OR_NULL(attach)) {
  212. rc = PTR_ERR(attach) ?: -ENOMEM;
  213. pr_err("%s: err: failed to attach dmabuf\n", __func__);
  214. goto map_err;
  215. }
  216. /* Get the scatterlist for the given attachment */
  217. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  218. if (IS_ERR_OR_NULL(table)) {
  219. rc = PTR_ERR(table) ?: -ENOMEM;
  220. pr_err("%s: err: failed to map table\n", __func__);
  221. goto map_table_err;
  222. }
  223. /* Map a scatterlist into an SMMU */
  224. rc = msm_dma_map_sg_lazy(cb->dev, table->sgl, table->nents,
  225. DMA_BIDIRECTIONAL, buf);
  226. if (rc != table->nents) {
  227. pr_err(
  228. "%s: err: mapping failed with rc(%d), expected rc(%d)\n",
  229. __func__, rc, table->nents);
  230. rc = -ENOMEM;
  231. goto map_sg_err;
  232. }
  233. if (table->sgl) {
  234. binfo->ion_buf.iova = table->sgl->dma_address;
  235. binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
  236. if (binfo->ion_buf.mapped_buf_size < fd_size) {
  237. pr_err("%s: err: mapping failed, size mismatch",
  238. __func__);
  239. rc = -ENOMEM;
  240. goto map_sg_err;
  241. }
  242. } else {
  243. pr_err("%s: err: sg list is NULL\n", __func__);
  244. rc = -ENOMEM;
  245. goto map_sg_err;
  246. }
  247. binfo->ion_buf.mapping_info.dev = cb->dev;
  248. binfo->ion_buf.mapping_info.mapping = cb->mapping;
  249. binfo->ion_buf.mapping_info.table = table;
  250. binfo->ion_buf.mapping_info.attach = attach;
  251. binfo->ion_buf.mapping_info.buf = buf;
  252. binfo->ion_buf.hndl = hndl;
  253. } else {
  254. pr_err("%s: err: smmu not enabled\n", __func__);
  255. rc = -EIO;
  256. goto map_err;
  257. }
  258. return 0;
  259. map_sg_err:
  260. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  261. map_table_err:
  262. dma_buf_detach(buf, attach);
  263. map_err:
  264. if (hndl)
  265. ion_free(clnt, hndl);
  266. import_buf_err:
  267. dma_buf_put(buf);
  268. return rc;
  269. }
  270. static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
  271. struct qcedev_reg_buf_info *binfo)
  272. {
  273. struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;
  274. struct qcedev_mem_client *mem_client = qce_hndl->cntl->mem_client;
  275. if (is_iommu_present(qce_hndl)) {
  276. msm_dma_unmap_sg(mapping_info->dev, mapping_info->table->sgl,
  277. mapping_info->table->nents, DMA_BIDIRECTIONAL,
  278. mapping_info->buf);
  279. dma_buf_unmap_attachment(mapping_info->attach,
  280. mapping_info->table, DMA_BIDIRECTIONAL);
  281. dma_buf_detach(mapping_info->buf, mapping_info->attach);
  282. dma_buf_put(mapping_info->buf);
  283. if (binfo->ion_buf.hndl)
  284. ion_free(mem_client->client, binfo->ion_buf.hndl);
  285. }
  286. return 0;
  287. }
  288. static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
  289. struct qcedev_mem_client *mem_client, int fd,
  290. unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
  291. {
  292. int rc = 0;
  293. switch (mem_client->mtype) {
  294. case MEM_ION:
  295. rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
  296. break;
  297. default:
  298. pr_err("%s: err: Mem type not supported\n", __func__);
  299. break;
  300. }
  301. if (rc)
  302. pr_err("%s: err: failed to map buffer\n", __func__);
  303. return rc;
  304. }
  305. static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
  306. struct qcedev_mem_client *mem_client,
  307. struct qcedev_reg_buf_info *binfo)
  308. {
  309. int rc = 0;
  310. switch (mem_client->mtype) {
  311. case MEM_ION:
  312. rc = ion_unmap_buffer(qce_hndl, binfo);
  313. break;
  314. default:
  315. pr_err("%s: err: Mem type not supported\n", __func__);
  316. break;
  317. }
  318. if (rc)
  319. pr_err("%s: err: failed to unmap buffer\n", __func__);
  320. return rc;
  321. }
  322. static bool compare_ion_buffers(struct qcedev_mem_client *mem_client,
  323. struct ion_handle *hndl, int fd)
  324. {
  325. bool match = false;
  326. struct ion_handle *fd_hndl = NULL;
  327. struct dma_buf *dma_buf;
  328. dma_buf = dma_buf_get(fd);
  329. if (IS_ERR_OR_NULL(dma_buf))
  330. return false;
  331. fd_hndl = ion_import_dma_buf(mem_client->client, dma_buf);
  332. if (IS_ERR_OR_NULL(fd_hndl)) {
  333. match = false;
  334. goto err_exit;
  335. }
  336. match = fd_hndl == hndl ? true : false;
  337. if (fd_hndl)
  338. ion_free(mem_client->client, fd_hndl);
  339. err_exit:
  340. dma_buf_put(dma_buf);
  341. return match;
  342. }
  343. int qcedev_check_and_map_buffer(void *handle,
  344. int fd, unsigned int offset, unsigned int fd_size,
  345. unsigned long long *vaddr)
  346. {
  347. bool found = false;
  348. struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
  349. struct qcedev_mem_client *mem_client = NULL;
  350. struct qcedev_handle *qce_hndl = handle;
  351. int rc = 0;
  352. unsigned long mapped_size = 0;
  353. if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
  354. pr_err("%s: err: invalid input arguments\n", __func__);
  355. return -EINVAL;
  356. }
  357. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  358. pr_err("%s: err: invalid qcedev handle\n", __func__);
  359. return -EINVAL;
  360. }
  361. mem_client = qce_hndl->cntl->mem_client;
  362. if (mem_client->mtype != MEM_ION)
  363. return -EPERM;
  364. /* Check if the buffer fd is already mapped */
  365. mutex_lock(&qce_hndl->registeredbufs.lock);
  366. list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
  367. found = compare_ion_buffers(mem_client, temp->ion_buf.hndl, fd);
  368. if (found) {
  369. *vaddr = temp->ion_buf.iova;
  370. mapped_size = temp->ion_buf.mapped_buf_size;
  371. atomic_inc(&temp->ref_count);
  372. break;
  373. }
  374. }
  375. mutex_unlock(&qce_hndl->registeredbufs.lock);
  376. /* If buffer fd is not mapped then create a fresh mapping */
  377. if (!found) {
  378. pr_debug("%s: info: ion fd not registered with driver\n",
  379. __func__);
  380. binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
  381. if (!binfo) {
  382. pr_err("%s: err: failed to allocate binfo\n",
  383. __func__);
  384. rc = -ENOMEM;
  385. goto error;
  386. }
  387. rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
  388. fd_size, binfo);
  389. if (rc) {
  390. pr_err("%s: err: failed to map fd (%d) error = %d\n",
  391. __func__, fd, rc);
  392. goto error;
  393. }
  394. *vaddr = binfo->ion_buf.iova;
  395. mapped_size = binfo->ion_buf.mapped_buf_size;
  396. atomic_inc(&binfo->ref_count);
  397. /* Add buffer mapping information to regd buffer list */
  398. mutex_lock(&qce_hndl->registeredbufs.lock);
  399. list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
  400. mutex_unlock(&qce_hndl->registeredbufs.lock);
  401. }
  402. /* Make sure the offset is within the mapped range */
  403. if (offset >= mapped_size) {
  404. pr_err(
  405. "%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
  406. __func__, offset, mapped_size, fd);
  407. rc = -ERANGE;
  408. goto unmap;
  409. }
  410. /* return the mapped virtual address adjusted by offset */
  411. *vaddr += offset;
  412. return 0;
  413. unmap:
  414. if (!found)
  415. qcedev_unmap_buffer(handle, mem_client, binfo);
  416. error:
  417. kfree(binfo);
  418. return rc;
  419. }
  420. int qcedev_check_and_unmap_buffer(void *handle, int fd)
  421. {
  422. struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
  423. struct qcedev_mem_client *mem_client = NULL;
  424. struct qcedev_handle *qce_hndl = handle;
  425. bool found = false;
  426. if (!handle || fd < 0) {
  427. pr_err("%s: err: invalid input arguments\n", __func__);
  428. return -EINVAL;
  429. }
  430. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  431. pr_err("%s: err: invalid qcedev handle\n", __func__);
  432. return -EINVAL;
  433. }
  434. mem_client = qce_hndl->cntl->mem_client;
  435. if (mem_client->mtype != MEM_ION)
  436. return -EPERM;
  437. /* Check if the buffer fd is mapped and present in the regd list. */
  438. mutex_lock(&qce_hndl->registeredbufs.lock);
  439. list_for_each_entry_safe(binfo, dummy,
  440. &qce_hndl->registeredbufs.list, list) {
  441. found = compare_ion_buffers(mem_client,
  442. binfo->ion_buf.hndl, fd);
  443. if (found) {
  444. atomic_dec(&binfo->ref_count);
  445. /* Unmap only if there are no more references */
  446. if (atomic_read(&binfo->ref_count) == 0) {
  447. qcedev_unmap_buffer(qce_hndl,
  448. mem_client, binfo);
  449. list_del(&binfo->list);
  450. kfree(binfo);
  451. }
  452. break;
  453. }
  454. }
  455. mutex_unlock(&qce_hndl->registeredbufs.lock);
  456. if (!found) {
  457. pr_err("%s: err: calling unmap on unknown fd %d\n",
  458. __func__, fd);
  459. return -EINVAL;
  460. }
  461. return 0;
  462. }