ota_crypto.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. /* Copyright (c) 2010-2014,2017 The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. /* QTI Over the Air (OTA) Crypto driver */
  14. #include <linux/types.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/kernel.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/fs.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/debugfs.h>
  27. #include <linux/cache.h>
  28. #include <linux/qcota.h>
  29. #include "qce.h"
  30. #include "qce_ota.h"
  31. enum qce_ota_oper_enum {
  32. QCE_OTA_F8_OPER = 0,
  33. QCE_OTA_MPKT_F8_OPER = 1,
  34. QCE_OTA_F9_OPER = 2,
  35. QCE_OTA_VAR_MPKT_F8_OPER = 3,
  36. QCE_OTA_OPER_LAST
  37. };
  38. struct ota_dev_control;
  39. struct ota_async_req {
  40. struct list_head rlist;
  41. struct completion complete;
  42. int err;
  43. enum qce_ota_oper_enum op;
  44. union {
  45. struct qce_f9_req f9_req;
  46. struct qce_f8_req f8_req;
  47. struct qce_f8_multi_pkt_req f8_mp_req;
  48. struct qce_f8_varible_multi_pkt_req f8_v_mp_req;
  49. } req;
  50. unsigned int steps;
  51. struct ota_qce_dev *pqce;
  52. };
  53. /*
  54. * Register ourselves as a misc device to be able to access the ota
  55. * from userspace.
  56. */
  57. #define QCOTA_DEV "qcota"
  58. struct ota_dev_control {
  59. /* misc device */
  60. struct miscdevice miscdevice;
  61. struct list_head ready_commands;
  62. unsigned int magic;
  63. struct list_head qce_dev;
  64. spinlock_t lock;
  65. struct mutex register_lock;
  66. bool registered;
  67. uint32_t total_units;
  68. };
  69. struct ota_qce_dev {
  70. struct list_head qlist;
  71. /* qce handle */
  72. void *qce;
  73. /* platform device */
  74. struct platform_device *pdev;
  75. struct ota_async_req *active_command;
  76. struct tasklet_struct done_tasklet;
  77. struct ota_dev_control *podev;
  78. uint32_t unit;
  79. u64 total_req;
  80. u64 err_req;
  81. };
  82. #define OTA_MAGIC 0x4f544143
  83. static long qcota_ioctl(struct file *file,
  84. unsigned int cmd, unsigned long arg);
  85. static int qcota_open(struct inode *inode, struct file *file);
  86. static int qcota_release(struct inode *inode, struct file *file);
  87. static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
  88. static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
  89. static const struct file_operations qcota_fops = {
  90. .owner = THIS_MODULE,
  91. .unlocked_ioctl = qcota_ioctl,
  92. .open = qcota_open,
  93. .release = qcota_release,
  94. };
  95. static struct ota_dev_control qcota_dev = {
  96. .miscdevice = {
  97. .minor = MISC_DYNAMIC_MINOR,
  98. .name = "qcota0",
  99. .fops = &qcota_fops,
  100. },
  101. .magic = OTA_MAGIC,
  102. };
  103. #define DEBUG_MAX_FNAME 16
  104. #define DEBUG_MAX_RW_BUF 1024
  105. struct qcota_stat {
  106. u64 f8_req;
  107. u64 f8_mp_req;
  108. u64 f8_v_mp_req;
  109. u64 f9_req;
  110. u64 f8_op_success;
  111. u64 f8_op_fail;
  112. u64 f8_mp_op_success;
  113. u64 f8_mp_op_fail;
  114. u64 f8_v_mp_op_success;
  115. u64 f8_v_mp_op_fail;
  116. u64 f9_op_success;
  117. u64 f9_op_fail;
  118. };
  119. static struct qcota_stat _qcota_stat;
  120. static struct dentry *_debug_dent;
  121. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  122. static int _debug_qcota;
  123. static struct ota_dev_control *qcota_control(void)
  124. {
  125. return &qcota_dev;
  126. }
  127. static int qcota_open(struct inode *inode, struct file *file)
  128. {
  129. struct ota_dev_control *podev;
  130. podev = qcota_control();
  131. if (podev == NULL) {
  132. pr_err("%s: no such device %d\n", __func__,
  133. MINOR(inode->i_rdev));
  134. return -ENOENT;
  135. }
  136. file->private_data = podev;
  137. return 0;
  138. }
  139. static int qcota_release(struct inode *inode, struct file *file)
  140. {
  141. struct ota_dev_control *podev;
  142. podev = file->private_data;
  143. if (podev != NULL && podev->magic != OTA_MAGIC) {
  144. pr_err("%s: invalid handle %pK\n",
  145. __func__, podev);
  146. }
  147. file->private_data = NULL;
  148. return 0;
  149. }
  150. static bool _next_v_mp_req(struct ota_async_req *areq)
  151. {
  152. unsigned char *p;
  153. if (areq->err)
  154. return false;
  155. if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
  156. return false;
  157. p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
  158. p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
  159. p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
  160. areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
  161. areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
  162. areq->req.f8_v_mp_req.qce_f8_req.data_len =
  163. areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
  164. areq->req.f8_v_mp_req.qce_f8_req.count_c++;
  165. return true;
  166. }
  167. static void req_done(unsigned long data)
  168. {
  169. struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
  170. struct ota_dev_control *podev = pqce->podev;
  171. struct ota_async_req *areq;
  172. unsigned long flags;
  173. struct ota_async_req *new_req = NULL;
  174. int ret = 0;
  175. bool schedule = true;
  176. spin_lock_irqsave(&podev->lock, flags);
  177. areq = pqce->active_command;
  178. if (unlikely(areq == NULL))
  179. pr_err("ota_crypto: req_done, no active request\n");
  180. else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
  181. if (_next_v_mp_req(areq)) {
  182. /* execute next subcommand */
  183. spin_unlock_irqrestore(&podev->lock, flags);
  184. ret = start_req(pqce, areq);
  185. if (unlikely(ret)) {
  186. areq->err = ret;
  187. schedule = true;
  188. spin_lock_irqsave(&podev->lock, flags);
  189. } else {
  190. areq = NULL;
  191. schedule = false;
  192. }
  193. } else {
  194. /* done with this variable mp req */
  195. schedule = true;
  196. }
  197. }
  198. while (schedule) {
  199. if (!list_empty(&podev->ready_commands)) {
  200. new_req = container_of(podev->ready_commands.next,
  201. struct ota_async_req, rlist);
  202. list_del(&new_req->rlist);
  203. pqce->active_command = new_req;
  204. spin_unlock_irqrestore(&podev->lock, flags);
  205. new_req->err = 0;
  206. /* start a new request */
  207. ret = start_req(pqce, new_req);
  208. if (unlikely(new_req && ret)) {
  209. new_req->err = ret;
  210. complete(&new_req->complete);
  211. ret = 0;
  212. new_req = NULL;
  213. spin_lock_irqsave(&podev->lock, flags);
  214. } else {
  215. schedule = false;
  216. }
  217. } else {
  218. pqce->active_command = NULL;
  219. spin_unlock_irqrestore(&podev->lock, flags);
  220. schedule = false;
  221. };
  222. }
  223. if (areq)
  224. complete(&areq->complete);
  225. }
  226. static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
  227. int ret)
  228. {
  229. struct ota_async_req *areq = (struct ota_async_req *) cookie;
  230. struct ota_qce_dev *pqce;
  231. pqce = areq->pqce;
  232. areq->req.f9_req.mac_i = *((uint32_t *)icv);
  233. if (ret) {
  234. pqce->err_req++;
  235. areq->err = -ENXIO;
  236. } else
  237. areq->err = 0;
  238. tasklet_schedule(&pqce->done_tasklet);
  239. }
  240. static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
  241. int ret)
  242. {
  243. struct ota_async_req *areq = (struct ota_async_req *) cookie;
  244. struct ota_qce_dev *pqce;
  245. pqce = areq->pqce;
  246. if (ret) {
  247. pqce->err_req++;
  248. areq->err = -ENXIO;
  249. } else {
  250. areq->err = 0;
  251. }
  252. tasklet_schedule(&pqce->done_tasklet);
  253. }
  254. static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
  255. {
  256. struct qce_f9_req *pf9;
  257. struct qce_f8_multi_pkt_req *p_mp_f8;
  258. struct qce_f8_req *pf8;
  259. int ret = 0;
  260. /* command should be on the podev->active_command */
  261. areq->pqce = pqce;
  262. switch (areq->op) {
  263. case QCE_OTA_F8_OPER:
  264. pf8 = &areq->req.f8_req;
  265. ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
  266. break;
  267. case QCE_OTA_MPKT_F8_OPER:
  268. p_mp_f8 = &areq->req.f8_mp_req;
  269. ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
  270. break;
  271. case QCE_OTA_F9_OPER:
  272. pf9 = &areq->req.f9_req;
  273. ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
  274. break;
  275. case QCE_OTA_VAR_MPKT_F8_OPER:
  276. pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
  277. ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
  278. break;
  279. default:
  280. ret = -ENOTSUPP;
  281. break;
  282. };
  283. areq->err = ret;
  284. pqce->total_req++;
  285. if (ret)
  286. pqce->err_req++;
  287. return ret;
  288. }
  289. static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
  290. {
  291. /* do this function with spinlock set */
  292. struct ota_qce_dev *p;
  293. if (unlikely(list_empty(&podev->qce_dev))) {
  294. pr_err("%s: no valid qce to schedule\n", __func__);
  295. return NULL;
  296. }
  297. list_for_each_entry(p, &podev->qce_dev, qlist) {
  298. if (p->active_command == NULL)
  299. return p;
  300. }
  301. return NULL;
  302. }
  303. static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
  304. {
  305. unsigned long flags;
  306. int ret = 0;
  307. struct qcota_stat *pstat;
  308. struct ota_qce_dev *pqce;
  309. areq->err = 0;
  310. spin_lock_irqsave(&podev->lock, flags);
  311. pqce = schedule_qce(podev);
  312. if (pqce) {
  313. pqce->active_command = areq;
  314. spin_unlock_irqrestore(&podev->lock, flags);
  315. ret = start_req(pqce, areq);
  316. if (ret != 0) {
  317. spin_lock_irqsave(&podev->lock, flags);
  318. pqce->active_command = NULL;
  319. spin_unlock_irqrestore(&podev->lock, flags);
  320. }
  321. } else {
  322. list_add_tail(&areq->rlist, &podev->ready_commands);
  323. spin_unlock_irqrestore(&podev->lock, flags);
  324. }
  325. if (ret == 0)
  326. wait_for_completion(&areq->complete);
  327. pstat = &_qcota_stat;
  328. switch (areq->op) {
  329. case QCE_OTA_F8_OPER:
  330. if (areq->err)
  331. pstat->f8_op_fail++;
  332. else
  333. pstat->f8_op_success++;
  334. break;
  335. case QCE_OTA_MPKT_F8_OPER:
  336. if (areq->err)
  337. pstat->f8_mp_op_fail++;
  338. else
  339. pstat->f8_mp_op_success++;
  340. break;
  341. case QCE_OTA_F9_OPER:
  342. if (areq->err)
  343. pstat->f9_op_fail++;
  344. else
  345. pstat->f9_op_success++;
  346. break;
  347. case QCE_OTA_VAR_MPKT_F8_OPER:
  348. default:
  349. if (areq->err)
  350. pstat->f8_v_mp_op_fail++;
  351. else
  352. pstat->f8_v_mp_op_success++;
  353. break;
  354. };
  355. return areq->err;
  356. }
  357. static long qcota_ioctl(struct file *file,
  358. unsigned int cmd, unsigned long arg)
  359. {
  360. int err = 0;
  361. struct ota_dev_control *podev;
  362. uint8_t *user_src;
  363. uint8_t *user_dst;
  364. uint8_t *k_buf = NULL;
  365. struct ota_async_req areq;
  366. uint32_t total, temp;
  367. struct qcota_stat *pstat;
  368. int i;
  369. uint8_t *p = NULL;
  370. podev = file->private_data;
  371. if (podev == NULL || podev->magic != OTA_MAGIC) {
  372. pr_err("%s: invalid handle %pK\n",
  373. __func__, podev);
  374. return -ENOENT;
  375. }
  376. /* Verify user arguments. */
  377. if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
  378. return -ENOTTY;
  379. init_completion(&areq.complete);
  380. pstat = &_qcota_stat;
  381. switch (cmd) {
  382. case QCOTA_F9_REQ:
  383. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  384. sizeof(struct qce_f9_req)))
  385. return -EFAULT;
  386. if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
  387. sizeof(struct qce_f9_req)))
  388. return -EFAULT;
  389. user_src = areq.req.f9_req.message;
  390. if (!access_ok(VERIFY_READ, (void __user *)user_src,
  391. areq.req.f9_req.msize))
  392. return -EFAULT;
  393. if (areq.req.f9_req.msize == 0)
  394. return 0;
  395. k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
  396. if (k_buf == NULL)
  397. return -ENOMEM;
  398. if (__copy_from_user(k_buf, (void __user *)user_src,
  399. areq.req.f9_req.msize)) {
  400. kfree(k_buf);
  401. return -EFAULT;
  402. }
  403. areq.req.f9_req.message = k_buf;
  404. areq.op = QCE_OTA_F9_OPER;
  405. pstat->f9_req++;
  406. err = submit_req(&areq, podev);
  407. areq.req.f9_req.message = user_src;
  408. if (err == 0 && __copy_to_user((void __user *)arg,
  409. &areq.req.f9_req, sizeof(struct qce_f9_req))) {
  410. err = -EFAULT;
  411. }
  412. kfree(k_buf);
  413. break;
  414. case QCOTA_F8_REQ:
  415. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  416. sizeof(struct qce_f8_req)))
  417. return -EFAULT;
  418. if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
  419. sizeof(struct qce_f8_req)))
  420. return -EFAULT;
  421. total = areq.req.f8_req.data_len;
  422. user_src = areq.req.f8_req.data_in;
  423. if (user_src != NULL) {
  424. if (!access_ok(VERIFY_READ, (void __user *)
  425. user_src, total))
  426. return -EFAULT;
  427. };
  428. user_dst = areq.req.f8_req.data_out;
  429. if (!access_ok(VERIFY_WRITE, (void __user *)
  430. user_dst, total))
  431. return -EFAULT;
  432. if (!total)
  433. return 0;
  434. k_buf = kmalloc(total, GFP_KERNEL);
  435. if (k_buf == NULL)
  436. return -ENOMEM;
  437. /* k_buf returned from kmalloc should be cache line aligned */
  438. if (user_src && __copy_from_user(k_buf,
  439. (void __user *)user_src, total)) {
  440. kfree(k_buf);
  441. return -EFAULT;
  442. }
  443. if (user_src)
  444. areq.req.f8_req.data_in = k_buf;
  445. else
  446. areq.req.f8_req.data_in = NULL;
  447. areq.req.f8_req.data_out = k_buf;
  448. areq.op = QCE_OTA_F8_OPER;
  449. pstat->f8_req++;
  450. err = submit_req(&areq, podev);
  451. if (err == 0 && __copy_to_user(user_dst, k_buf, total))
  452. err = -EFAULT;
  453. kfree(k_buf);
  454. break;
  455. case QCOTA_F8_MPKT_REQ:
  456. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  457. sizeof(struct qce_f8_multi_pkt_req)))
  458. return -EFAULT;
  459. if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
  460. sizeof(struct qce_f8_multi_pkt_req)))
  461. return -EFAULT;
  462. temp = areq.req.f8_mp_req.qce_f8_req.data_len;
  463. if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
  464. areq.req.f8_mp_req.cipher_size)
  465. return -EINVAL;
  466. total = (uint32_t) areq.req.f8_mp_req.num_pkt *
  467. areq.req.f8_mp_req.qce_f8_req.data_len;
  468. user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
  469. if (!access_ok(VERIFY_READ, (void __user *)
  470. user_src, total))
  471. return -EFAULT;
  472. user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
  473. if (!access_ok(VERIFY_WRITE, (void __user *)
  474. user_dst, total))
  475. return -EFAULT;
  476. if (!total)
  477. return 0;
  478. k_buf = kmalloc(total, GFP_KERNEL);
  479. if (k_buf == NULL)
  480. return -ENOMEM;
  481. /* k_buf returned from kmalloc should be cache line aligned */
  482. if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
  483. kfree(k_buf);
  484. return -EFAULT;
  485. }
  486. areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
  487. areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
  488. areq.op = QCE_OTA_MPKT_F8_OPER;
  489. pstat->f8_mp_req++;
  490. err = submit_req(&areq, podev);
  491. if (err == 0 && __copy_to_user(user_dst, k_buf, total))
  492. err = -EFAULT;
  493. kfree(k_buf);
  494. break;
  495. case QCOTA_F8_V_MPKT_REQ:
  496. if (!access_ok(VERIFY_WRITE, (void __user *)arg,
  497. sizeof(struct qce_f8_varible_multi_pkt_req)))
  498. return -EFAULT;
  499. if (__copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
  500. sizeof(struct qce_f8_varible_multi_pkt_req)))
  501. return -EFAULT;
  502. if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
  503. return -EINVAL;
  504. for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  505. if (!access_ok(VERIFY_WRITE, (void __user *)
  506. areq.req.f8_v_mp_req.cipher_iov[i].addr,
  507. areq.req.f8_v_mp_req.cipher_iov[i].size))
  508. return -EFAULT;
  509. total += areq.req.f8_v_mp_req.cipher_iov[i].size;
  510. total = ALIGN(total, L1_CACHE_BYTES);
  511. }
  512. if (!total)
  513. return 0;
  514. k_buf = kmalloc(total, GFP_KERNEL);
  515. if (k_buf == NULL)
  516. return -ENOMEM;
  517. for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  518. user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
  519. if (__copy_from_user(p, (void __user *)user_src,
  520. areq.req.f8_v_mp_req.cipher_iov[i].size)) {
  521. kfree(k_buf);
  522. return -EFAULT;
  523. }
  524. p += areq.req.f8_v_mp_req.cipher_iov[i].size;
  525. p = (uint8_t *) ALIGN(((uintptr_t)p),
  526. L1_CACHE_BYTES);
  527. }
  528. areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
  529. areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
  530. areq.req.f8_v_mp_req.qce_f8_req.data_len =
  531. areq.req.f8_v_mp_req.cipher_iov[0].size;
  532. areq.steps = 0;
  533. areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
  534. pstat->f8_v_mp_req++;
  535. err = submit_req(&areq, podev);
  536. if (err != 0) {
  537. kfree(k_buf);
  538. return err;
  539. }
  540. for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
  541. user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
  542. if (__copy_to_user(user_dst, p,
  543. areq.req.f8_v_mp_req.cipher_iov[i].size)) {
  544. kfree(k_buf);
  545. return -EFAULT;
  546. }
  547. p += areq.req.f8_v_mp_req.cipher_iov[i].size;
  548. p = (uint8_t *) ALIGN(((uintptr_t)p),
  549. L1_CACHE_BYTES);
  550. }
  551. kfree(k_buf);
  552. break;
  553. default:
  554. return -ENOTTY;
  555. }
  556. return err;
  557. }
  558. static int qcota_probe(struct platform_device *pdev)
  559. {
  560. void *handle = NULL;
  561. int rc = 0;
  562. struct ota_dev_control *podev;
  563. struct ce_hw_support ce_support;
  564. struct ota_qce_dev *pqce;
  565. unsigned long flags;
  566. podev = &qcota_dev;
  567. pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
  568. if (!pqce) {
  569. pr_err("qcota_probe: Memory allocation FAIL\n");
  570. return -ENOMEM;
  571. }
  572. pqce->podev = podev;
  573. pqce->active_command = NULL;
  574. tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
  575. /* open qce */
  576. handle = qce_open(pdev, &rc);
  577. if (handle == NULL) {
  578. pr_err("%s: device %s, can not open qce\n",
  579. __func__, pdev->name);
  580. goto err;
  581. }
  582. if (qce_hw_support(handle, &ce_support) < 0 ||
  583. ce_support.ota == false) {
  584. pr_err("%s: device %s, qce does not support ota capability\n",
  585. __func__, pdev->name);
  586. rc = -ENODEV;
  587. goto err;
  588. }
  589. pqce->qce = handle;
  590. pqce->pdev = pdev;
  591. pqce->total_req = 0;
  592. pqce->err_req = 0;
  593. platform_set_drvdata(pdev, pqce);
  594. mutex_lock(&podev->register_lock);
  595. rc = 0;
  596. if (podev->registered == false) {
  597. rc = misc_register(&podev->miscdevice);
  598. if (rc == 0) {
  599. pqce->unit = podev->total_units;
  600. podev->total_units++;
  601. podev->registered = true;
  602. };
  603. } else {
  604. pqce->unit = podev->total_units;
  605. podev->total_units++;
  606. }
  607. mutex_unlock(&podev->register_lock);
  608. if (rc) {
  609. pr_err("ion: failed to register misc device.\n");
  610. goto err;
  611. }
  612. spin_lock_irqsave(&podev->lock, flags);
  613. list_add_tail(&pqce->qlist, &podev->qce_dev);
  614. spin_unlock_irqrestore(&podev->lock, flags);
  615. return 0;
  616. err:
  617. if (handle)
  618. qce_close(handle);
  619. platform_set_drvdata(pdev, NULL);
  620. tasklet_kill(&pqce->done_tasklet);
  621. kfree(pqce);
  622. return rc;
  623. }
  624. static int qcota_remove(struct platform_device *pdev)
  625. {
  626. struct ota_dev_control *podev;
  627. struct ota_qce_dev *pqce;
  628. unsigned long flags;
  629. pqce = platform_get_drvdata(pdev);
  630. if (!pqce)
  631. return 0;
  632. if (pqce->qce)
  633. qce_close(pqce->qce);
  634. podev = pqce->podev;
  635. if (!podev)
  636. goto ret;
  637. spin_lock_irqsave(&podev->lock, flags);
  638. list_del(&pqce->qlist);
  639. spin_unlock_irqrestore(&podev->lock, flags);
  640. mutex_lock(&podev->register_lock);
  641. if (--podev->total_units == 0) {
  642. if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
  643. misc_deregister(&podev->miscdevice);
  644. podev->registered = false;
  645. }
  646. mutex_unlock(&podev->register_lock);
  647. ret:
  648. tasklet_kill(&pqce->done_tasklet);
  649. kfree(pqce);
  650. return 0;
  651. }
  652. static const struct of_device_id qcota_match[] = {
  653. { .compatible = "qcom,qcota",
  654. },
  655. {}
  656. };
  657. static struct platform_driver qcota_plat_driver = {
  658. .probe = qcota_probe,
  659. .remove = qcota_remove,
  660. .driver = {
  661. .name = "qcota",
  662. .owner = THIS_MODULE,
  663. .of_match_table = qcota_match,
  664. },
  665. };
  666. static int _disp_stats(void)
  667. {
  668. struct qcota_stat *pstat;
  669. int len = 0;
  670. struct ota_dev_control *podev = &qcota_dev;
  671. unsigned long flags;
  672. struct ota_qce_dev *p;
  673. pstat = &_qcota_stat;
  674. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  675. "\nQTI OTA crypto accelerator Statistics:\n");
  676. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  677. " F8 request : %llu\n",
  678. pstat->f8_req);
  679. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  680. " F8 operation success : %llu\n",
  681. pstat->f8_op_success);
  682. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  683. " F8 operation fail : %llu\n",
  684. pstat->f8_op_fail);
  685. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  686. " F8 MP request : %llu\n",
  687. pstat->f8_mp_req);
  688. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  689. " F8 MP operation success : %llu\n",
  690. pstat->f8_mp_op_success);
  691. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  692. " F8 MP operation fail : %llu\n",
  693. pstat->f8_mp_op_fail);
  694. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  695. " F8 Variable MP request : %llu\n",
  696. pstat->f8_v_mp_req);
  697. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  698. " F8 Variable MP operation success: %llu\n",
  699. pstat->f8_v_mp_op_success);
  700. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  701. " F8 Variable MP operation fail : %llu\n",
  702. pstat->f8_v_mp_op_fail);
  703. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  704. " F9 request : %llu\n",
  705. pstat->f9_req);
  706. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  707. " F9 operation success : %llu\n",
  708. pstat->f9_op_success);
  709. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  710. " F9 operation fail : %llu\n",
  711. pstat->f9_op_fail);
  712. spin_lock_irqsave(&podev->lock, flags);
  713. list_for_each_entry(p, &podev->qce_dev, qlist) {
  714. len += scnprintf(
  715. _debug_read_buf + len,
  716. DEBUG_MAX_RW_BUF - len - 1,
  717. " Engine %4d Req : %llu\n",
  718. p->unit,
  719. p->total_req
  720. );
  721. len += scnprintf(
  722. _debug_read_buf + len,
  723. DEBUG_MAX_RW_BUF - len - 1,
  724. " Engine %4d Req Error : %llu\n",
  725. p->unit,
  726. p->err_req
  727. );
  728. }
  729. spin_unlock_irqrestore(&podev->lock, flags);
  730. return len;
  731. }
  732. static int _debug_stats_open(struct inode *inode, struct file *file)
  733. {
  734. file->private_data = inode->i_private;
  735. return 0;
  736. }
  737. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  738. size_t count, loff_t *ppos)
  739. {
  740. int rc = -EINVAL;
  741. int len;
  742. len = _disp_stats();
  743. if (len <= count)
  744. rc = simple_read_from_buffer((void __user *) buf, len,
  745. ppos, (void *) _debug_read_buf, len);
  746. return rc;
  747. }
  748. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  749. size_t count, loff_t *ppos)
  750. {
  751. struct ota_dev_control *podev = &qcota_dev;
  752. unsigned long flags;
  753. struct ota_qce_dev *p;
  754. memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
  755. spin_lock_irqsave(&podev->lock, flags);
  756. list_for_each_entry(p, &podev->qce_dev, qlist) {
  757. p->total_req = 0;
  758. p->err_req = 0;
  759. }
  760. spin_unlock_irqrestore(&podev->lock, flags);
  761. return count;
  762. }
  763. static const struct file_operations _debug_stats_ops = {
  764. .open = _debug_stats_open,
  765. .read = _debug_stats_read,
  766. .write = _debug_stats_write,
  767. };
  768. static int _qcota_debug_init(void)
  769. {
  770. int rc;
  771. char name[DEBUG_MAX_FNAME];
  772. struct dentry *dent;
  773. _debug_dent = debugfs_create_dir("qcota", NULL);
  774. if (IS_ERR(_debug_dent)) {
  775. pr_err("qcota debugfs_create_dir fail, error %ld\n",
  776. PTR_ERR(_debug_dent));
  777. return PTR_ERR(_debug_dent);
  778. }
  779. snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
  780. _debug_qcota = 0;
  781. dent = debugfs_create_file(name, 0644, _debug_dent,
  782. &_debug_qcota, &_debug_stats_ops);
  783. if (dent == NULL) {
  784. pr_err("qcota debugfs_create_file fail, error %ld\n",
  785. PTR_ERR(dent));
  786. rc = PTR_ERR(dent);
  787. goto err;
  788. }
  789. return 0;
  790. err:
  791. debugfs_remove_recursive(_debug_dent);
  792. return rc;
  793. }
  794. static int __init qcota_init(void)
  795. {
  796. int rc;
  797. struct ota_dev_control *podev;
  798. rc = _qcota_debug_init();
  799. if (rc)
  800. return rc;
  801. podev = &qcota_dev;
  802. INIT_LIST_HEAD(&podev->ready_commands);
  803. INIT_LIST_HEAD(&podev->qce_dev);
  804. spin_lock_init(&podev->lock);
  805. mutex_init(&podev->register_lock);
  806. podev->registered = false;
  807. podev->total_units = 0;
  808. return platform_driver_register(&qcota_plat_driver);
  809. }
  810. static void __exit qcota_exit(void)
  811. {
  812. debugfs_remove_recursive(_debug_dent);
  813. platform_driver_unregister(&qcota_plat_driver);
  814. }
  815. MODULE_LICENSE("GPL v2");
  816. MODULE_DESCRIPTION("QTI Ota Crypto driver");
  817. module_init(qcota_init);
  818. module_exit(qcota_exit);