diag_memorydevice.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/err.h>
  17. #include <linux/sched.h>
  18. #include <linux/ratelimit.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/diagchar.h>
  21. #include <linux/delay.h>
  22. #include <linux/kmemleak.h>
  23. #include <linux/uaccess.h>
  24. #include "diagchar.h"
  25. #include "diag_memorydevice.h"
  26. #include "diagfwd_bridge.h"
  27. #include "diag_mux.h"
  28. #include "diagmem.h"
  29. #include "diagfwd.h"
  30. #include "diagfwd_peripheral.h"
  31. #include "diag_ipc_logging.h"
  32. struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
  33. {
  34. .id = DIAG_MD_LOCAL,
  35. .ctx = 0,
  36. .mempool = POOL_TYPE_MUX_APPS,
  37. .num_tbl_entries = 0,
  38. .tbl = NULL,
  39. .ops = NULL,
  40. },
  41. #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
  42. {
  43. .id = DIAG_MD_MDM,
  44. .ctx = 0,
  45. .mempool = POOL_TYPE_MDM_MUX,
  46. .num_tbl_entries = 0,
  47. .tbl = NULL,
  48. .ops = NULL,
  49. },
  50. {
  51. .id = DIAG_MD_MDM2,
  52. .ctx = 0,
  53. .mempool = POOL_TYPE_MDM2_MUX,
  54. .num_tbl_entries = 0,
  55. .tbl = NULL,
  56. .ops = NULL,
  57. },
  58. {
  59. .id = DIAG_MD_SMUX,
  60. .ctx = 0,
  61. .mempool = POOL_TYPE_QSC_MUX,
  62. .num_tbl_entries = 0,
  63. .tbl = NULL,
  64. .ops = NULL,
  65. }
  66. #endif
  67. };
  68. int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
  69. {
  70. if (id < 0 || id >= NUM_DIAG_MD_DEV || !ops)
  71. return -EINVAL;
  72. diag_md[id].ops = ops;
  73. diag_md[id].ctx = ctx;
  74. return 0;
  75. }
  76. void diag_md_open_all(void)
  77. {
  78. int i;
  79. struct diag_md_info *ch = NULL;
  80. for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
  81. ch = &diag_md[i];
  82. if (ch->ops && ch->ops->open)
  83. ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
  84. }
  85. }
  86. void diag_md_close_all(void)
  87. {
  88. int i, j;
  89. unsigned long flags;
  90. struct diag_md_info *ch = NULL;
  91. struct diag_buf_tbl_t *entry = NULL;
  92. for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
  93. ch = &diag_md[i];
  94. if (ch->ops && ch->ops->close)
  95. ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
  96. /*
  97. * When we close the Memory device mode, make sure we flush the
  98. * internal buffers in the table so that there are no stale
  99. * entries.
  100. */
  101. spin_lock_irqsave(&ch->lock, flags);
  102. for (j = 0; j < ch->num_tbl_entries; j++) {
  103. entry = &ch->tbl[j];
  104. if (entry->len <= 0)
  105. continue;
  106. if (ch->ops && ch->ops->write_done)
  107. ch->ops->write_done(entry->buf, entry->len,
  108. entry->ctx,
  109. DIAG_MEMORY_DEVICE_MODE);
  110. entry->buf = NULL;
  111. entry->len = 0;
  112. entry->ctx = 0;
  113. }
  114. spin_unlock_irqrestore(&ch->lock, flags);
  115. }
  116. diag_ws_reset(DIAG_WS_MUX);
  117. }
  118. int diag_md_write(int id, unsigned char *buf, int len, int ctx)
  119. {
  120. int i, peripheral, pid = 0;
  121. uint8_t found = 0;
  122. unsigned long flags;
  123. struct diag_md_info *ch = NULL;
  124. struct diag_md_session_t *session_info = NULL;
  125. if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
  126. return -EINVAL;
  127. if (!buf || len < 0)
  128. return -EINVAL;
  129. peripheral = diag_md_get_peripheral(ctx);
  130. if (peripheral < 0)
  131. return -EINVAL;
  132. mutex_lock(&driver->md_session_lock);
  133. session_info = diag_md_session_get_peripheral(peripheral);
  134. if (!session_info) {
  135. mutex_unlock(&driver->md_session_lock);
  136. return -EIO;
  137. }
  138. pid = session_info->pid;
  139. mutex_unlock(&driver->md_session_lock);
  140. ch = &diag_md[id];
  141. if (!ch)
  142. return -EINVAL;
  143. spin_lock_irqsave(&ch->lock, flags);
  144. for (i = 0; i < ch->num_tbl_entries && !found; i++) {
  145. if (ch->tbl[i].buf != buf)
  146. continue;
  147. found = 1;
  148. pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
  149. buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
  150. GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
  151. ch->tbl[i].buf = NULL;
  152. ch->tbl[i].len = 0;
  153. ch->tbl[i].ctx = 0;
  154. }
  155. spin_unlock_irqrestore(&ch->lock, flags);
  156. if (found)
  157. return -ENOMEM;
  158. spin_lock_irqsave(&ch->lock, flags);
  159. for (i = 0; i < ch->num_tbl_entries && !found; i++) {
  160. if (ch->tbl[i].len == 0) {
  161. ch->tbl[i].buf = buf;
  162. ch->tbl[i].len = len;
  163. ch->tbl[i].ctx = ctx;
  164. found = 1;
  165. diag_ws_on_read(DIAG_WS_MUX, len);
  166. }
  167. }
  168. spin_unlock_irqrestore(&ch->lock, flags);
  169. if (!found) {
  170. pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
  171. id);
  172. return -ENOMEM;
  173. }
  174. found = 0;
  175. mutex_lock(&driver->diagchar_mutex);
  176. for (i = 0; i < driver->num_clients && !found; i++) {
  177. if ((driver->client_map[i].pid != pid) ||
  178. (driver->client_map[i].pid == 0))
  179. continue;
  180. found = 1;
  181. if (!(driver->data_ready[i] & USER_SPACE_DATA_TYPE)) {
  182. driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
  183. atomic_inc(&driver->data_ready_notif[i]);
  184. }
  185. pr_debug("diag: wake up logging process\n");
  186. wake_up_interruptible(&driver->wait_q);
  187. }
  188. mutex_unlock(&driver->diagchar_mutex);
  189. if (!found)
  190. return -EINVAL;
  191. return 0;
  192. }
  193. int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
  194. struct diag_md_session_t *info)
  195. {
  196. int i, j;
  197. int err = 0;
  198. int ret = *pret;
  199. int num_data = 0;
  200. int remote_token;
  201. unsigned long flags;
  202. struct diag_md_info *ch = NULL;
  203. struct diag_buf_tbl_t *entry = NULL;
  204. uint8_t drain_again = 0;
  205. int peripheral = 0;
  206. struct diag_md_session_t *session_info = NULL;
  207. struct pid *pid_struct = NULL;
  208. for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
  209. ch = &diag_md[i];
  210. for (j = 0; j < ch->num_tbl_entries && !err; j++) {
  211. entry = &ch->tbl[j];
  212. if (entry->len <= 0 || entry->buf == NULL)
  213. continue;
  214. peripheral = diag_md_get_peripheral(entry->ctx);
  215. if (peripheral < 0)
  216. goto drop_data;
  217. session_info =
  218. diag_md_session_get_peripheral(peripheral);
  219. if (!session_info)
  220. goto drop_data;
  221. if (session_info && info &&
  222. (session_info->pid != info->pid))
  223. continue;
  224. if ((info && (info->peripheral_mask &
  225. MD_PERIPHERAL_MASK(peripheral)) == 0))
  226. goto drop_data;
  227. pid_struct = find_get_pid(session_info->pid);
  228. if (!pid_struct) {
  229. err = -ESRCH;
  230. DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
  231. "diag: No such md_session_map[%d] with pid = %d err=%d exists..\n",
  232. peripheral, session_info->pid, err);
  233. goto drop_data;
  234. }
  235. /*
  236. * If the data is from remote processor, copy the remote
  237. * token first
  238. */
  239. if (i > 0) {
  240. if ((ret + (3 * sizeof(int)) + entry->len) >=
  241. buf_size) {
  242. drain_again = 1;
  243. break;
  244. }
  245. } else {
  246. if ((ret + (2 * sizeof(int)) + entry->len) >=
  247. buf_size) {
  248. drain_again = 1;
  249. break;
  250. }
  251. }
  252. if (i > 0) {
  253. remote_token = diag_get_remote(i);
  254. if (get_pid_task(pid_struct, PIDTYPE_PID)) {
  255. err = copy_to_user(buf + ret,
  256. &remote_token,
  257. sizeof(int));
  258. if (err)
  259. goto drop_data;
  260. ret += sizeof(int);
  261. }
  262. }
  263. /* Copy the length of data being passed */
  264. if (get_pid_task(pid_struct, PIDTYPE_PID)) {
  265. err = copy_to_user(buf + ret,
  266. (void *)&(entry->len),
  267. sizeof(int));
  268. if (err)
  269. goto drop_data;
  270. ret += sizeof(int);
  271. }
  272. /* Copy the actual data being passed */
  273. if (get_pid_task(pid_struct, PIDTYPE_PID)) {
  274. err = copy_to_user(buf + ret,
  275. (void *)entry->buf,
  276. entry->len);
  277. if (err)
  278. goto drop_data;
  279. ret += entry->len;
  280. }
  281. /*
  282. * The data is now copied to the user space client,
  283. * Notify that the write is complete and delete its
  284. * entry from the table
  285. */
  286. num_data++;
  287. drop_data:
  288. spin_lock_irqsave(&ch->lock, flags);
  289. if (ch->ops && ch->ops->write_done)
  290. ch->ops->write_done(entry->buf, entry->len,
  291. entry->ctx,
  292. DIAG_MEMORY_DEVICE_MODE);
  293. diag_ws_on_copy(DIAG_WS_MUX);
  294. entry->buf = NULL;
  295. entry->len = 0;
  296. entry->ctx = 0;
  297. spin_unlock_irqrestore(&ch->lock, flags);
  298. }
  299. }
  300. *pret = ret;
  301. if (pid_struct && get_pid_task(pid_struct, PIDTYPE_PID)) {
  302. err = copy_to_user(buf + sizeof(int),
  303. (void *)&num_data,
  304. sizeof(int));
  305. }
  306. diag_ws_on_copy_complete(DIAG_WS_MUX);
  307. if (drain_again)
  308. chk_logging_wakeup();
  309. return err;
  310. }
  311. int diag_md_close_peripheral(int id, uint8_t peripheral)
  312. {
  313. int i;
  314. uint8_t found = 0;
  315. unsigned long flags;
  316. struct diag_md_info *ch = NULL;
  317. struct diag_buf_tbl_t *entry = NULL;
  318. if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
  319. return -EINVAL;
  320. ch = &diag_md[id];
  321. spin_lock_irqsave(&ch->lock, flags);
  322. for (i = 0; i < ch->num_tbl_entries && !found; i++) {
  323. entry = &ch->tbl[i];
  324. if (peripheral > NUM_PERIPHERALS) {
  325. if (GET_PD_CTXT(entry->ctx) != peripheral)
  326. continue;
  327. } else {
  328. if (GET_BUF_PERIPHERAL(entry->ctx) !=
  329. peripheral)
  330. continue;
  331. }
  332. found = 1;
  333. if (ch->ops && ch->ops->write_done) {
  334. ch->ops->write_done(entry->buf, entry->len,
  335. entry->ctx,
  336. DIAG_MEMORY_DEVICE_MODE);
  337. entry->buf = NULL;
  338. entry->len = 0;
  339. entry->ctx = 0;
  340. }
  341. }
  342. spin_unlock_irqrestore(&ch->lock, flags);
  343. return 0;
  344. }
  345. int diag_md_init(void)
  346. {
  347. int i, j;
  348. struct diag_md_info *ch = NULL;
  349. for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
  350. ch = &diag_md[i];
  351. ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
  352. ch->tbl = kzalloc(ch->num_tbl_entries *
  353. sizeof(struct diag_buf_tbl_t),
  354. GFP_KERNEL);
  355. if (!ch->tbl)
  356. goto fail;
  357. for (j = 0; j < ch->num_tbl_entries; j++) {
  358. ch->tbl[j].buf = NULL;
  359. ch->tbl[j].len = 0;
  360. ch->tbl[j].ctx = 0;
  361. }
  362. spin_lock_init(&(ch->lock));
  363. }
  364. return 0;
  365. fail:
  366. diag_md_exit();
  367. return -ENOMEM;
  368. }
  369. int diag_md_mdm_init(void)
  370. {
  371. int i, j;
  372. struct diag_md_info *ch = NULL;
  373. for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
  374. ch = &diag_md[i];
  375. ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
  376. ch->tbl = kcalloc(ch->num_tbl_entries, sizeof(*ch->tbl),
  377. GFP_KERNEL);
  378. if (!ch->tbl)
  379. goto fail;
  380. for (j = 0; j < ch->num_tbl_entries; j++) {
  381. ch->tbl[j].buf = NULL;
  382. ch->tbl[j].len = 0;
  383. ch->tbl[j].ctx = 0;
  384. }
  385. spin_lock_init(&(ch->lock));
  386. }
  387. return 0;
  388. fail:
  389. diag_md_mdm_exit();
  390. return -ENOMEM;
  391. }
  392. void diag_md_exit(void)
  393. {
  394. int i;
  395. struct diag_md_info *ch = NULL;
  396. for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
  397. ch = &diag_md[i];
  398. kfree(ch->tbl);
  399. ch->num_tbl_entries = 0;
  400. ch->ops = NULL;
  401. }
  402. }
  403. void diag_md_mdm_exit(void)
  404. {
  405. int i;
  406. struct diag_md_info *ch = NULL;
  407. for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
  408. ch = &diag_md[i];
  409. kfree(ch->tbl);
  410. ch->num_tbl_entries = 0;
  411. ch->ops = NULL;
  412. }
  413. }