msm_memshare.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149
  1. /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/err.h>
  14. #include <linux/slab.h>
  15. #include <linux/module.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/mutex.h>
  18. #include <linux/of_device.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/notifier.h>
  21. #include <soc/qcom/subsystem_restart.h>
  22. #include <soc/qcom/subsystem_notif.h>
  23. #include <soc/qcom/msm_qmi_interface.h>
  24. #include <soc/qcom/scm.h>
  25. #include "msm_memshare.h"
  26. #include "heap_mem_ext_v01.h"
  27. #include <soc/qcom/secure_buffer.h>
  28. #include <soc/qcom/ramdump.h>
  29. /* Macros */
  30. #define MEMSHARE_DEV_NAME "memshare"
  31. #define MEMSHARE_CHILD_DEV_NAME "memshare_child"
  32. static unsigned long(attrs);
  33. static struct qmi_handle *mem_share_svc_handle;
  34. static void mem_share_svc_recv_msg(struct work_struct *work);
  35. static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg);
  36. static struct workqueue_struct *mem_share_svc_workqueue;
  37. static uint64_t bootup_request;
  38. static bool ramdump_event;
  39. static void *memshare_ramdump_dev[MAX_CLIENTS];
  40. static struct device *memshare_dev[MAX_CLIENTS];
  41. /* Memshare Driver Structure */
  42. struct memshare_driver {
  43. struct device *dev;
  44. struct mutex mem_share;
  45. struct mutex mem_free;
  46. struct work_struct memshare_init_work;
  47. };
  48. struct memshare_child {
  49. struct device *dev;
  50. };
  51. static struct memshare_driver *memsh_drv;
  52. static struct memshare_child *memsh_child;
  53. static struct mem_blocks memblock[MAX_CLIENTS];
  54. static uint32_t num_clients;
  55. static struct msg_desc mem_share_svc_alloc_req_desc = {
  56. .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
  57. .msg_id = MEM_ALLOC_REQ_MSG_V01,
  58. .ei_array = mem_alloc_req_msg_data_v01_ei,
  59. };
  60. static struct msg_desc mem_share_svc_alloc_resp_desc = {
  61. .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
  62. .msg_id = MEM_ALLOC_RESP_MSG_V01,
  63. .ei_array = mem_alloc_resp_msg_data_v01_ei,
  64. };
  65. static struct msg_desc mem_share_svc_free_req_desc = {
  66. .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
  67. .msg_id = MEM_FREE_REQ_MSG_V01,
  68. .ei_array = mem_free_req_msg_data_v01_ei,
  69. };
  70. static struct msg_desc mem_share_svc_free_resp_desc = {
  71. .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
  72. .msg_id = MEM_FREE_RESP_MSG_V01,
  73. .ei_array = mem_free_resp_msg_data_v01_ei,
  74. };
  75. static struct msg_desc mem_share_svc_alloc_generic_req_desc = {
  76. .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
  77. .msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01,
  78. .ei_array = mem_alloc_generic_req_msg_data_v01_ei,
  79. };
  80. static struct msg_desc mem_share_svc_alloc_generic_resp_desc = {
  81. .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
  82. .msg_id = MEM_ALLOC_GENERIC_RESP_MSG_V01,
  83. .ei_array = mem_alloc_generic_resp_msg_data_v01_ei,
  84. };
  85. static struct msg_desc mem_share_svc_free_generic_req_desc = {
  86. .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
  87. .msg_id = MEM_FREE_GENERIC_REQ_MSG_V01,
  88. .ei_array = mem_free_generic_req_msg_data_v01_ei,
  89. };
  90. static struct msg_desc mem_share_svc_free_generic_resp_desc = {
  91. .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
  92. .msg_id = MEM_FREE_GENERIC_RESP_MSG_V01,
  93. .ei_array = mem_free_generic_resp_msg_data_v01_ei,
  94. };
  95. static struct msg_desc mem_share_svc_size_query_req_desc = {
  96. .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
  97. .msg_id = MEM_QUERY_SIZE_REQ_MSG_V01,
  98. .ei_array = mem_query_size_req_msg_data_v01_ei,
  99. };
  100. static struct msg_desc mem_share_svc_size_query_resp_desc = {
  101. .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
  102. .msg_id = MEM_QUERY_SIZE_RESP_MSG_V01,
  103. .ei_array = mem_query_size_resp_msg_data_v01_ei,
  104. };
  105. /*
  106. * This API creates ramdump dev handlers
  107. * for each of the memshare clients.
  108. * These dev handlers will be used for
  109. * extracting the ramdump for loaned memory
  110. * segments.
  111. */
  112. static int mem_share_configure_ramdump(int client)
  113. {
  114. char client_name[18];
  115. const char *clnt = NULL;
  116. switch (client) {
  117. case 0:
  118. clnt = "GPS";
  119. break;
  120. case 1:
  121. clnt = "FTM";
  122. break;
  123. case 2:
  124. clnt = "DIAG";
  125. break;
  126. default:
  127. pr_err("memshare: no memshare clients registered\n");
  128. return -EINVAL;
  129. }
  130. snprintf(client_name, sizeof(client_name),
  131. "memshare_%s", clnt);
  132. if (memshare_dev[client]) {
  133. memshare_ramdump_dev[client] =
  134. create_ramdump_device(client_name,
  135. memshare_dev[client]);
  136. } else {
  137. pr_err("memshare:%s: invalid memshare device\n", __func__);
  138. return -ENODEV;
  139. }
  140. if (IS_ERR_OR_NULL(memshare_ramdump_dev[client])) {
  141. pr_err("memshare: %s: Unable to create memshare ramdump device\n",
  142. __func__);
  143. memshare_ramdump_dev[client] = NULL;
  144. return -ENOMEM;
  145. }
  146. return 0;
  147. }
  148. static int check_client(int client_id, int proc, int request)
  149. {
  150. int i = 0, rc;
  151. int found = DHMS_MEM_CLIENT_INVALID;
  152. for (i = 0; i < MAX_CLIENTS; i++) {
  153. if (memblock[i].client_id == client_id &&
  154. memblock[i].peripheral == proc) {
  155. found = i;
  156. break;
  157. }
  158. }
  159. if ((found == DHMS_MEM_CLIENT_INVALID) && !request) {
  160. pr_debug("memshare: No registered client, adding a new client\n");
  161. /* Add a new client */
  162. for (i = 0; i < MAX_CLIENTS; i++) {
  163. if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) {
  164. memblock[i].client_id = client_id;
  165. memblock[i].allotted = 0;
  166. memblock[i].guarantee = 0;
  167. memblock[i].peripheral = proc;
  168. found = i;
  169. if (!memblock[i].file_created) {
  170. rc = mem_share_configure_ramdump(i);
  171. if (rc)
  172. pr_err("memshare: %s, Cannot create ramdump for client: %d\n",
  173. __func__, client_id);
  174. else
  175. memblock[i].file_created = 1;
  176. }
  177. break;
  178. }
  179. }
  180. }
  181. return found;
  182. }
  183. static void free_client(int id)
  184. {
  185. memblock[id].phy_addr = 0;
  186. memblock[id].virtual_addr = 0;
  187. memblock[id].allotted = 0;
  188. memblock[id].guarantee = 0;
  189. memblock[id].sequence_id = -1;
  190. memblock[id].memory_type = MEMORY_CMA;
  191. }
  192. static void fill_alloc_response(struct mem_alloc_generic_resp_msg_v01 *resp,
  193. int id, int *flag)
  194. {
  195. resp->sequence_id_valid = 1;
  196. resp->sequence_id = memblock[id].sequence_id;
  197. resp->dhms_mem_alloc_addr_info_valid = 1;
  198. resp->dhms_mem_alloc_addr_info_len = 1;
  199. resp->dhms_mem_alloc_addr_info[0].phy_addr = memblock[id].phy_addr;
  200. resp->dhms_mem_alloc_addr_info[0].num_bytes = memblock[id].size;
  201. if (!*flag) {
  202. resp->resp.result = QMI_RESULT_SUCCESS_V01;
  203. resp->resp.error = QMI_ERR_NONE_V01;
  204. } else {
  205. resp->resp.result = QMI_RESULT_FAILURE_V01;
  206. resp->resp.error = QMI_ERR_NO_MEMORY_V01;
  207. }
  208. }
  209. static void initialize_client(void)
  210. {
  211. int i;
  212. for (i = 0; i < MAX_CLIENTS; i++) {
  213. memblock[i].allotted = 0;
  214. memblock[i].size = 0;
  215. memblock[i].guarantee = 0;
  216. memblock[i].phy_addr = 0;
  217. memblock[i].virtual_addr = 0;
  218. memblock[i].client_id = DHMS_MEM_CLIENT_INVALID;
  219. memblock[i].peripheral = -1;
  220. memblock[i].sequence_id = -1;
  221. memblock[i].memory_type = MEMORY_CMA;
  222. memblock[i].free_memory = 0;
  223. memblock[i].hyp_mapping = 0;
  224. memblock[i].file_created = 0;
  225. }
  226. attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  227. }
  228. /*
  229. * mem_share_do_ramdump() function initializes the
  230. * ramdump segments with the physical address and
  231. * size of the memshared clients. Extraction of ramdump
  232. * is skipped if memshare client is not allotted
  233. * This calls the ramdump api in extracting the
  234. * ramdump in elf format.
  235. */
  236. static int mem_share_do_ramdump(void)
  237. {
  238. int i = 0, ret;
  239. char *client_name = NULL;
  240. u32 source_vmlist[1] = {VMID_MSS_MSA};
  241. int dest_vmids[1] = {VMID_HLOS};
  242. int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
  243. for (i = 0; i < num_clients; i++) {
  244. struct ramdump_segment *ramdump_segments_tmp = NULL;
  245. switch (i) {
  246. case 0:
  247. client_name = "GPS";
  248. break;
  249. case 1:
  250. client_name = "FTM";
  251. break;
  252. case 2:
  253. client_name = "DIAG";
  254. break;
  255. default:
  256. pr_err("memshare: no memshare clients registered\n");
  257. return -EINVAL;
  258. }
  259. if (!memblock[i].allotted) {
  260. pr_err("memshare:%s memblock is not allotted\n",
  261. client_name);
  262. continue;
  263. }
  264. if (memblock[i].hyp_mapping &&
  265. memblock[i].peripheral ==
  266. DHMS_MEM_PROC_MPSS_V01) {
  267. pr_debug("memshare: hypervisor unmapping for client id: %d\n",
  268. memblock[i].client_id);
  269. if (memblock[i].alloc_request)
  270. continue;
  271. ret = hyp_assign_phys(
  272. memblock[i].phy_addr,
  273. memblock[i].size,
  274. source_vmlist,
  275. 1, dest_vmids,
  276. dest_perms, 1);
  277. if (ret) {
  278. /*
  279. * This is an error case as hyp
  280. * mapping was successful
  281. * earlier but during unmap
  282. * it lead to failure.
  283. */
  284. pr_err("memshare: %s, failed to map the region to APPS\n",
  285. __func__);
  286. } else {
  287. memblock[i].hyp_mapping = 0;
  288. }
  289. }
  290. ramdump_segments_tmp = kcalloc(1,
  291. sizeof(struct ramdump_segment),
  292. GFP_KERNEL);
  293. if (!ramdump_segments_tmp)
  294. return -ENOMEM;
  295. ramdump_segments_tmp[0].size = memblock[i].size;
  296. ramdump_segments_tmp[0].address = memblock[i].phy_addr;
  297. pr_debug("memshare: %s:%s client:id: %d:size = %d\n",
  298. __func__, client_name, i, memblock[i].size);
  299. ret = do_elf_ramdump(memshare_ramdump_dev[i],
  300. ramdump_segments_tmp, 1);
  301. kfree(ramdump_segments_tmp);
  302. if (ret < 0) {
  303. pr_err("memshare: Unable to dump: %d\n", ret);
  304. return ret;
  305. }
  306. }
  307. return 0;
  308. }
  309. static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
  310. void *_cmd)
  311. {
  312. int i, ret, size = 0;
  313. u32 source_vmlist[1] = {VMID_MSS_MSA};
  314. int dest_vmids[1] = {VMID_HLOS};
  315. int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
  316. struct notif_data *notifdata = NULL;
  317. mutex_lock(&memsh_drv->mem_share);
  318. switch (code) {
  319. case SUBSYS_BEFORE_SHUTDOWN:
  320. bootup_request++;
  321. for (i = 0; i < MAX_CLIENTS; i++)
  322. memblock[i].alloc_request = 0;
  323. break;
  324. case SUBSYS_RAMDUMP_NOTIFICATION:
  325. ramdump_event = 1;
  326. break;
  327. case SUBSYS_BEFORE_POWERUP:
  328. if (_cmd) {
  329. notifdata = (struct notif_data *) _cmd;
  330. } else {
  331. ramdump_event = 0;
  332. break;
  333. }
  334. if (notifdata->enable_ramdump && ramdump_event) {
  335. pr_debug("memshare: %s, Ramdump collection is enabled\n",
  336. __func__);
  337. ret = mem_share_do_ramdump();
  338. if (ret)
  339. pr_err("memshare: Ramdump collection failed\n");
  340. ramdump_event = 0;
  341. }
  342. break;
  343. case SUBSYS_AFTER_POWERUP:
  344. pr_debug("memshare: Modem has booted up\n");
  345. for (i = 0; i < MAX_CLIENTS; i++) {
  346. size = memblock[i].size;
  347. if (memblock[i].free_memory > 0 &&
  348. bootup_request >= 2) {
  349. memblock[i].free_memory -= 1;
  350. pr_debug("memshare: free_memory count: %d for client id: %d\n",
  351. memblock[i].free_memory,
  352. memblock[i].client_id);
  353. }
  354. if (memblock[i].free_memory == 0 &&
  355. memblock[i].peripheral ==
  356. DHMS_MEM_PROC_MPSS_V01 &&
  357. !memblock[i].guarantee &&
  358. memblock[i].allotted &&
  359. !memblock[i].alloc_request) {
  360. pr_debug("memshare: hypervisor unmapping for client id: %d\n",
  361. memblock[i].client_id);
  362. if (memblock[i].hyp_mapping) {
  363. ret = hyp_assign_phys(
  364. memblock[i].phy_addr,
  365. memblock[i].size,
  366. source_vmlist,
  367. 1, dest_vmids,
  368. dest_perms, 1);
  369. if (ret &&
  370. memblock[i].hyp_mapping == 1) {
  371. /*
  372. * This is an error case as hyp
  373. * mapping was successful
  374. * earlier but during unmap
  375. * it lead to failure.
  376. */
  377. pr_err("memshare: %s, failed to unmap the region\n",
  378. __func__);
  379. } else {
  380. memblock[i].hyp_mapping = 0;
  381. }
  382. }
  383. if (memblock[i].client_id == 1) {
  384. /*
  385. * Check if the client id
  386. * is of diag so that free
  387. * the memory region of
  388. * client's size + guard
  389. * bytes of 4K.
  390. */
  391. size += MEMSHARE_GUARD_BYTES;
  392. }
  393. dma_free_attrs(memsh_drv->dev,
  394. size, memblock[i].virtual_addr,
  395. memblock[i].phy_addr,
  396. attrs);
  397. free_client(i);
  398. }
  399. }
  400. bootup_request++;
  401. break;
  402. default:
  403. break;
  404. }
  405. mutex_unlock(&memsh_drv->mem_share);
  406. return NOTIFY_DONE;
  407. }
  408. static struct notifier_block nb = {
  409. .notifier_call = modem_notifier_cb,
  410. };
  411. static void shared_hyp_mapping(int client_id)
  412. {
  413. int ret;
  414. u32 source_vmlist[1] = {VMID_HLOS};
  415. int dest_vmids[1] = {VMID_MSS_MSA};
  416. int dest_perms[1] = {PERM_READ|PERM_WRITE};
  417. if (client_id == DHMS_MEM_CLIENT_INVALID) {
  418. pr_err("memshare: %s, Invalid Client\n", __func__);
  419. return;
  420. }
  421. ret = hyp_assign_phys(memblock[client_id].phy_addr,
  422. memblock[client_id].size,
  423. source_vmlist, 1, dest_vmids,
  424. dest_perms, 1);
  425. if (ret != 0) {
  426. pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n",
  427. memblock[client_id].size, ret);
  428. return;
  429. }
  430. memblock[client_id].hyp_mapping = 1;
  431. }
  432. static int handle_alloc_req(void *req_h, void *req, void *conn_h)
  433. {
  434. struct mem_alloc_req_msg_v01 *alloc_req;
  435. struct mem_alloc_resp_msg_v01 alloc_resp;
  436. int rc = 0;
  437. mutex_lock(&memsh_drv->mem_share);
  438. alloc_req = (struct mem_alloc_req_msg_v01 *)req;
  439. pr_debug("memshare: %s: Received Alloc Request: alloc_req->num_bytes = %d\n",
  440. __func__, alloc_req->num_bytes);
  441. if (!memblock[GPS].size) {
  442. memset(&alloc_resp, 0, sizeof(alloc_resp));
  443. alloc_resp.resp = QMI_RESULT_FAILURE_V01;
  444. rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes,
  445. &memblock[GPS]);
  446. }
  447. alloc_resp.num_bytes_valid = 1;
  448. alloc_resp.num_bytes = alloc_req->num_bytes;
  449. alloc_resp.handle_valid = 1;
  450. alloc_resp.handle = memblock[GPS].phy_addr;
  451. if (rc) {
  452. alloc_resp.resp = QMI_RESULT_FAILURE_V01;
  453. memblock[GPS].size = 0;
  454. } else {
  455. alloc_resp.resp = QMI_RESULT_SUCCESS_V01;
  456. }
  457. mutex_unlock(&memsh_drv->mem_share);
  458. pr_debug("memshare: %s, alloc_resp.num_bytes :%d, alloc_resp.resp :%lx\n",
  459. __func__, alloc_resp.num_bytes,
  460. (unsigned long int)alloc_resp.resp);
  461. rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
  462. &mem_share_svc_alloc_resp_desc, &alloc_resp,
  463. sizeof(alloc_resp));
  464. if (rc < 0)
  465. pr_err("memshare: %s, Error sending the alloc request: %d\n",
  466. __func__, rc);
  467. return rc;
  468. }
  469. static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h)
  470. {
  471. struct mem_alloc_generic_req_msg_v01 *alloc_req;
  472. struct mem_alloc_generic_resp_msg_v01 *alloc_resp;
  473. int rc, resp = 0;
  474. int client_id;
  475. uint32_t size = 0;
  476. mutex_lock(&memsh_drv->mem_share);
  477. alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req;
  478. pr_debug("memshare: alloc request client id: %d proc _id: %d\n",
  479. alloc_req->client_id, alloc_req->proc_id);
  480. alloc_resp = kzalloc(sizeof(*alloc_resp),
  481. GFP_KERNEL);
  482. if (!alloc_resp) {
  483. mutex_unlock(&memsh_drv->mem_share);
  484. return -ENOMEM;
  485. }
  486. alloc_resp->resp.result = QMI_RESULT_FAILURE_V01;
  487. alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01;
  488. client_id = check_client(alloc_req->client_id, alloc_req->proc_id,
  489. CHECK);
  490. if (client_id >= MAX_CLIENTS) {
  491. pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
  492. __func__, alloc_req->client_id,
  493. alloc_req->proc_id);
  494. kfree(alloc_resp);
  495. alloc_resp = NULL;
  496. mutex_unlock(&memsh_drv->mem_share);
  497. return -EINVAL;
  498. }
  499. if (!memblock[client_id].allotted) {
  500. if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0)
  501. size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES;
  502. else
  503. size = alloc_req->num_bytes;
  504. rc = memshare_alloc(memsh_drv->dev, size,
  505. &memblock[client_id]);
  506. if (rc) {
  507. pr_err("memshare: %s,Unable to allocate memory for requested client\n",
  508. __func__);
  509. resp = 1;
  510. }
  511. if (!resp) {
  512. memblock[client_id].free_memory += 1;
  513. memblock[client_id].allotted = 1;
  514. memblock[client_id].size = alloc_req->num_bytes;
  515. memblock[client_id].peripheral = alloc_req->proc_id;
  516. }
  517. }
  518. pr_debug("memshare: In %s, free memory count for client id: %d = %d",
  519. __func__, memblock[client_id].client_id,
  520. memblock[client_id].free_memory);
  521. memblock[client_id].sequence_id = alloc_req->sequence_id;
  522. memblock[client_id].alloc_request = 1;
  523. fill_alloc_response(alloc_resp, client_id, &resp);
  524. /*
  525. * Perform the Hypervisor mapping in order to avoid XPU viloation
  526. * to the allocated region for Modem Clients
  527. */
  528. if (!memblock[client_id].hyp_mapping &&
  529. memblock[client_id].allotted)
  530. shared_hyp_mapping(client_id);
  531. mutex_unlock(&memsh_drv->mem_share);
  532. pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.resp.result :%lx\n",
  533. alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
  534. (unsigned long int)alloc_resp->resp.result);
  535. rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
  536. &mem_share_svc_alloc_generic_resp_desc, alloc_resp,
  537. sizeof(alloc_resp));
  538. if (rc < 0)
  539. pr_err("memshare: %s, Error sending the alloc request: %d\n",
  540. __func__, rc);
  541. kfree(alloc_resp);
  542. alloc_resp = NULL;
  543. return rc;
  544. }
  545. static int handle_free_req(void *req_h, void *req, void *conn_h)
  546. {
  547. struct mem_free_req_msg_v01 *free_req;
  548. struct mem_free_resp_msg_v01 free_resp;
  549. int rc;
  550. mutex_lock(&memsh_drv->mem_free);
  551. if (!memblock[GPS].guarantee) {
  552. free_req = (struct mem_free_req_msg_v01 *)req;
  553. pr_debug("memshare: %s: Received Free Request\n", __func__);
  554. memset(&free_resp, 0, sizeof(free_resp));
  555. dma_free_coherent(memsh_drv->dev, memblock[GPS].size,
  556. memblock[GPS].virtual_addr,
  557. free_req->handle);
  558. }
  559. free_resp.resp = QMI_RESULT_SUCCESS_V01;
  560. mutex_unlock(&memsh_drv->mem_free);
  561. rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
  562. &mem_share_svc_free_resp_desc, &free_resp,
  563. sizeof(free_resp));
  564. if (rc < 0)
  565. pr_err("memshare: %s, Error sending the free request: %d\n",
  566. __func__, rc);
  567. return rc;
  568. }
  569. static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
  570. {
  571. struct mem_free_generic_req_msg_v01 *free_req;
  572. struct mem_free_generic_resp_msg_v01 free_resp;
  573. int rc, flag = 0, ret = 0, size = 0;
  574. uint32_t client_id;
  575. u32 source_vmlist[1] = {VMID_MSS_MSA};
  576. int dest_vmids[1] = {VMID_HLOS};
  577. int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
  578. mutex_lock(&memsh_drv->mem_free);
  579. free_req = (struct mem_free_generic_req_msg_v01 *)req;
  580. pr_debug("memshare: %s: Received Free Request\n", __func__);
  581. memset(&free_resp, 0, sizeof(free_resp));
  582. free_resp.resp.error = QMI_ERR_INTERNAL_V01;
  583. free_resp.resp.result = QMI_RESULT_FAILURE_V01;
  584. pr_debug("memshare: Client id: %d proc id: %d\n", free_req->client_id,
  585. free_req->proc_id);
  586. client_id = check_client(free_req->client_id, free_req->proc_id, FREE);
  587. if (client_id == DHMS_MEM_CLIENT_INVALID) {
  588. pr_err("memshare: %s, Invalid client request to free memory\n",
  589. __func__);
  590. flag = 1;
  591. } else if (!memblock[client_id].guarantee &&
  592. memblock[client_id].allotted) {
  593. pr_debug("memshare: %s: size: %d",
  594. __func__, memblock[client_id].size);
  595. ret = hyp_assign_phys(memblock[client_id].phy_addr,
  596. memblock[client_id].size, source_vmlist, 1,
  597. dest_vmids, dest_perms, 1);
  598. if (ret && memblock[client_id].hyp_mapping == 1) {
  599. /*
  600. * This is an error case as hyp mapping was successful
  601. * earlier but during unmap it lead to failure.
  602. */
  603. pr_err("memshare: %s, failed to unmap the region\n",
  604. __func__);
  605. }
  606. size = memblock[client_id].size;
  607. if (memblock[client_id].client_id == 1) {
  608. /*
  609. * Check if the client id
  610. * is of diag so that free
  611. * the memory region of
  612. * client's size + guard
  613. * bytes of 4K.
  614. */
  615. size += MEMSHARE_GUARD_BYTES;
  616. }
  617. dma_free_attrs(memsh_drv->dev, size,
  618. memblock[client_id].virtual_addr,
  619. memblock[client_id].phy_addr,
  620. attrs);
  621. free_client(client_id);
  622. } else {
  623. pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n",
  624. __func__);
  625. }
  626. if (flag) {
  627. free_resp.resp.result = QMI_RESULT_FAILURE_V01;
  628. free_resp.resp.error = QMI_ERR_INVALID_ID_V01;
  629. } else {
  630. free_resp.resp.result = QMI_RESULT_SUCCESS_V01;
  631. free_resp.resp.error = QMI_ERR_NONE_V01;
  632. }
  633. mutex_unlock(&memsh_drv->mem_free);
  634. rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
  635. &mem_share_svc_free_generic_resp_desc, &free_resp,
  636. sizeof(free_resp));
  637. if (rc < 0)
  638. pr_err("memshare: %s, Error sending the free request: %d\n",
  639. __func__, rc);
  640. return rc;
  641. }
  642. static int handle_query_size_req(void *req_h, void *req, void *conn_h)
  643. {
  644. int rc, client_id;
  645. struct mem_query_size_req_msg_v01 *query_req;
  646. struct mem_query_size_rsp_msg_v01 *query_resp;
  647. mutex_lock(&memsh_drv->mem_share);
  648. query_req = (struct mem_query_size_req_msg_v01 *)req;
  649. query_resp = kzalloc(sizeof(*query_resp),
  650. GFP_KERNEL);
  651. if (!query_resp) {
  652. mutex_unlock(&memsh_drv->mem_share);
  653. return -ENOMEM;
  654. }
  655. pr_debug("memshare: query request client id: %d proc _id: %d\n",
  656. query_req->client_id, query_req->proc_id);
  657. client_id = check_client(query_req->client_id, query_req->proc_id,
  658. CHECK);
  659. if (client_id >= MAX_CLIENTS) {
  660. pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
  661. __func__, query_req->client_id,
  662. query_req->proc_id);
  663. kfree(query_resp);
  664. query_resp = NULL;
  665. mutex_unlock(&memsh_drv->mem_share);
  666. return -EINVAL;
  667. }
  668. if (memblock[client_id].size) {
  669. query_resp->size_valid = 1;
  670. query_resp->size = memblock[client_id].size;
  671. } else {
  672. query_resp->size_valid = 1;
  673. query_resp->size = 0;
  674. }
  675. query_resp->resp.result = QMI_RESULT_SUCCESS_V01;
  676. query_resp->resp.error = QMI_ERR_NONE_V01;
  677. mutex_unlock(&memsh_drv->mem_share);
  678. pr_debug("memshare: query_resp.size :%d, query_resp.resp.result :%lx\n",
  679. query_resp->size,
  680. (unsigned long int)query_resp->resp.result);
  681. rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
  682. &mem_share_svc_size_query_resp_desc, query_resp,
  683. sizeof(query_resp));
  684. if (rc < 0)
  685. pr_err("memshare: %s, Error sending the query request: %d\n",
  686. __func__, rc);
  687. kfree(query_resp);
  688. query_resp = NULL;
  689. return rc;
  690. }
  691. static int mem_share_svc_connect_cb(struct qmi_handle *handle,
  692. void *conn_h)
  693. {
  694. if (mem_share_svc_handle != handle || !conn_h)
  695. return -EINVAL;
  696. return 0;
  697. }
  698. static int mem_share_svc_disconnect_cb(struct qmi_handle *handle,
  699. void *conn_h)
  700. {
  701. if (mem_share_svc_handle != handle || !conn_h)
  702. return -EINVAL;
  703. return 0;
  704. }
  705. static int mem_share_svc_req_desc_cb(unsigned int msg_id,
  706. struct msg_desc **req_desc)
  707. {
  708. int rc;
  709. pr_debug("memshare: %s\n", __func__);
  710. switch (msg_id) {
  711. case MEM_ALLOC_REQ_MSG_V01:
  712. *req_desc = &mem_share_svc_alloc_req_desc;
  713. rc = sizeof(struct mem_alloc_req_msg_v01);
  714. break;
  715. case MEM_FREE_REQ_MSG_V01:
  716. *req_desc = &mem_share_svc_free_req_desc;
  717. rc = sizeof(struct mem_free_req_msg_v01);
  718. break;
  719. case MEM_ALLOC_GENERIC_REQ_MSG_V01:
  720. *req_desc = &mem_share_svc_alloc_generic_req_desc;
  721. rc = sizeof(struct mem_alloc_generic_req_msg_v01);
  722. break;
  723. case MEM_FREE_GENERIC_REQ_MSG_V01:
  724. *req_desc = &mem_share_svc_free_generic_req_desc;
  725. rc = sizeof(struct mem_free_generic_req_msg_v01);
  726. break;
  727. case MEM_QUERY_SIZE_REQ_MSG_V01:
  728. *req_desc = &mem_share_svc_size_query_req_desc;
  729. rc = sizeof(struct mem_query_size_req_msg_v01);
  730. break;
  731. default:
  732. rc = -ENOTSUPP;
  733. break;
  734. }
  735. return rc;
  736. }
  737. static int mem_share_svc_req_cb(struct qmi_handle *handle, void *conn_h,
  738. void *req_h, unsigned int msg_id, void *req)
  739. {
  740. int rc;
  741. pr_debug("memshare: %s\n", __func__);
  742. if (mem_share_svc_handle != handle || !conn_h)
  743. return -EINVAL;
  744. switch (msg_id) {
  745. case MEM_ALLOC_REQ_MSG_V01:
  746. rc = handle_alloc_req(req_h, req, conn_h);
  747. break;
  748. case MEM_FREE_REQ_MSG_V01:
  749. rc = handle_free_req(req_h, req, conn_h);
  750. break;
  751. case MEM_ALLOC_GENERIC_REQ_MSG_V01:
  752. rc = handle_alloc_generic_req(req_h, req, conn_h);
  753. break;
  754. case MEM_FREE_GENERIC_REQ_MSG_V01:
  755. rc = handle_free_generic_req(req_h, req, conn_h);
  756. break;
  757. case MEM_QUERY_SIZE_REQ_MSG_V01:
  758. rc = handle_query_size_req(req_h, req, conn_h);
  759. break;
  760. default:
  761. rc = -ENOTSUPP;
  762. break;
  763. }
  764. return rc;
  765. }
  766. static void mem_share_svc_recv_msg(struct work_struct *work)
  767. {
  768. int rc;
  769. pr_debug("memshare: %s\n", __func__);
  770. do {
  771. rc = qmi_recv_msg(mem_share_svc_handle);
  772. pr_debug("memshare: %s: Notified about a Receive Event",
  773. __func__);
  774. } while (!rc);
  775. if (rc != -ENOMSG)
  776. pr_err("memshare: %s: Error = %d while receiving message\n",
  777. __func__, rc);
  778. }
  779. static void qmi_mem_share_svc_ntfy(struct qmi_handle *handle,
  780. enum qmi_event_type event, void *priv)
  781. {
  782. pr_debug("memshare: %s\n", __func__);
  783. if (event == QMI_RECV_MSG)
  784. queue_delayed_work(mem_share_svc_workqueue,
  785. &work_recv_msg, 0);
  786. }
  787. static struct qmi_svc_ops_options mem_share_svc_ops_options = {
  788. .version = 1,
  789. .service_id = MEM_SHARE_SERVICE_SVC_ID,
  790. .service_vers = MEM_SHARE_SERVICE_VERS,
  791. .service_ins = MEM_SHARE_SERVICE_INS_ID,
  792. .connect_cb = mem_share_svc_connect_cb,
  793. .disconnect_cb = mem_share_svc_disconnect_cb,
  794. .req_desc_cb = mem_share_svc_req_desc_cb,
  795. .req_cb = mem_share_svc_req_cb,
  796. };
  797. int memshare_alloc(struct device *dev,
  798. unsigned int block_size,
  799. struct mem_blocks *pblk)
  800. {
  801. pr_debug("memshare: %s", __func__);
  802. if (!pblk) {
  803. pr_err("memshare: %s: Failed memory block allocation\n",
  804. __func__);
  805. return -ENOMEM;
  806. }
  807. pblk->virtual_addr = dma_alloc_attrs(dev, block_size,
  808. &pblk->phy_addr, GFP_KERNEL,
  809. attrs);
  810. if (pblk->virtual_addr == NULL)
  811. return -ENOMEM;
  812. return 0;
  813. }
  814. static void memshare_init_worker(struct work_struct *work)
  815. {
  816. int rc;
  817. mem_share_svc_workqueue =
  818. create_singlethread_workqueue("mem_share_svc");
  819. if (!mem_share_svc_workqueue)
  820. return;
  821. mem_share_svc_handle = qmi_handle_create(qmi_mem_share_svc_ntfy, NULL);
  822. if (!mem_share_svc_handle) {
  823. pr_err("memshare: %s: Creating mem_share_svc qmi handle failed\n",
  824. __func__);
  825. destroy_workqueue(mem_share_svc_workqueue);
  826. return;
  827. }
  828. rc = qmi_svc_register(mem_share_svc_handle, &mem_share_svc_ops_options);
  829. if (rc < 0) {
  830. pr_err("memshare: %s: Registering mem share svc failed %d\n",
  831. __func__, rc);
  832. qmi_handle_destroy(mem_share_svc_handle);
  833. destroy_workqueue(mem_share_svc_workqueue);
  834. return;
  835. }
  836. pr_debug("memshare: memshare_init successful\n");
  837. }
  838. static int memshare_child_probe(struct platform_device *pdev)
  839. {
  840. int rc;
  841. uint32_t size, client_id;
  842. const char *name;
  843. struct memshare_child *drv;
  844. drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child),
  845. GFP_KERNEL);
  846. if (!drv)
  847. return -ENOMEM;
  848. drv->dev = &pdev->dev;
  849. memsh_child = drv;
  850. platform_set_drvdata(pdev, memsh_child);
  851. rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size",
  852. &size);
  853. if (rc) {
  854. pr_err("memshare: %s, Error reading size of clients, rc: %d\n",
  855. __func__, rc);
  856. return rc;
  857. }
  858. rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id",
  859. &client_id);
  860. if (rc) {
  861. pr_err("memshare: %s, Error reading client id, rc: %d\n",
  862. __func__, rc);
  863. return rc;
  864. }
  865. memblock[num_clients].guarantee = of_property_read_bool(
  866. pdev->dev.of_node,
  867. "qcom,allocate-boot-time");
  868. rc = of_property_read_string(pdev->dev.of_node, "label",
  869. &name);
  870. if (rc) {
  871. pr_err("memshare: %s, Error reading peripheral info for client, rc: %d\n",
  872. __func__, rc);
  873. return rc;
  874. }
  875. if (strcmp(name, "modem") == 0)
  876. memblock[num_clients].peripheral = DHMS_MEM_PROC_MPSS_V01;
  877. else if (strcmp(name, "adsp") == 0)
  878. memblock[num_clients].peripheral = DHMS_MEM_PROC_ADSP_V01;
  879. else if (strcmp(name, "wcnss") == 0)
  880. memblock[num_clients].peripheral = DHMS_MEM_PROC_WCNSS_V01;
  881. memblock[num_clients].size = size;
  882. memblock[num_clients].client_id = client_id;
  883. /*
  884. * Memshare allocation for guaranteed clients
  885. */
  886. if (memblock[num_clients].guarantee && size > 0) {
  887. if (client_id == 1)
  888. size += MEMSHARE_GUARD_BYTES;
  889. rc = memshare_alloc(memsh_child->dev,
  890. size,
  891. &memblock[num_clients]);
  892. if (rc) {
  893. pr_err("memshare: %s, Unable to allocate memory for guaranteed clients, rc: %d\n",
  894. __func__, rc);
  895. return rc;
  896. }
  897. memblock[num_clients].allotted = 1;
  898. shared_hyp_mapping(num_clients);
  899. }
  900. /*
  901. * call for creating ramdump dev handlers for
  902. * memshare clients
  903. */
  904. memshare_dev[num_clients] = &pdev->dev;
  905. if (!memblock[num_clients].file_created) {
  906. rc = mem_share_configure_ramdump(num_clients);
  907. if (rc)
  908. pr_err("memshare: %s, cannot collect dumps for client id: %d\n",
  909. __func__,
  910. memblock[num_clients].client_id);
  911. else
  912. memblock[num_clients].file_created = 1;
  913. }
  914. num_clients++;
  915. return 0;
  916. }
  917. static int memshare_probe(struct platform_device *pdev)
  918. {
  919. int rc;
  920. struct memshare_driver *drv;
  921. drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver),
  922. GFP_KERNEL);
  923. if (!drv)
  924. return -ENOMEM;
  925. /* Memory allocation has been done successfully */
  926. mutex_init(&drv->mem_free);
  927. mutex_init(&drv->mem_share);
  928. INIT_WORK(&drv->memshare_init_work, memshare_init_worker);
  929. schedule_work(&drv->memshare_init_work);
  930. drv->dev = &pdev->dev;
  931. memsh_drv = drv;
  932. platform_set_drvdata(pdev, memsh_drv);
  933. initialize_client();
  934. num_clients = 0;
  935. rc = of_platform_populate(pdev->dev.of_node, NULL, NULL,
  936. &pdev->dev);
  937. if (rc) {
  938. pr_err("memshare: %s, error populating the devices\n",
  939. __func__);
  940. return rc;
  941. }
  942. subsys_notif_register_notifier("modem", &nb);
  943. pr_debug("memshare: %s, Memshare inited\n", __func__);
  944. return 0;
  945. }
  946. static int memshare_remove(struct platform_device *pdev)
  947. {
  948. if (!memsh_drv)
  949. return 0;
  950. qmi_svc_unregister(mem_share_svc_handle);
  951. flush_workqueue(mem_share_svc_workqueue);
  952. qmi_handle_destroy(mem_share_svc_handle);
  953. destroy_workqueue(mem_share_svc_workqueue);
  954. return 0;
  955. }
  956. static int memshare_child_remove(struct platform_device *pdev)
  957. {
  958. return 0;
  959. }
  960. static const struct of_device_id memshare_match_table[] = {
  961. {
  962. .compatible = "qcom,memshare",
  963. },
  964. {}
  965. };
  966. static const struct of_device_id memshare_match_table1[] = {
  967. {
  968. .compatible = "qcom,memshare-peripheral",
  969. },
  970. {}
  971. };
  972. static struct platform_driver memshare_pdriver = {
  973. .probe = memshare_probe,
  974. .remove = memshare_remove,
  975. .driver = {
  976. .name = MEMSHARE_DEV_NAME,
  977. .owner = THIS_MODULE,
  978. .of_match_table = memshare_match_table,
  979. },
  980. };
  981. static struct platform_driver memshare_pchild = {
  982. .probe = memshare_child_probe,
  983. .remove = memshare_child_remove,
  984. .driver = {
  985. .name = MEMSHARE_CHILD_DEV_NAME,
  986. .owner = THIS_MODULE,
  987. .of_match_table = memshare_match_table1,
  988. },
  989. };
  990. module_platform_driver(memshare_pdriver);
  991. module_platform_driver(memshare_pchild);
  992. MODULE_DESCRIPTION("Mem Share QMI Service Driver");
  993. MODULE_LICENSE("GPL v2");