qmi_cooling.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
  14. #include <linux/module.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/thermal.h>
  17. #include <linux/err.h>
  18. #include <linux/slab.h>
  19. #include <linux/of.h>
  20. #include <soc/qcom/msm_qmi_interface.h>
  21. #include "thermal_mitigation_device_service_v01.h"
  22. #define QMI_CDEV_DRIVER "qmi-cooling-device"
  23. #define QMI_TMD_RESP_TOUT_MSEC 50
  24. #define QMI_CLIENT_NAME_LENGTH 40
  25. enum qmi_device_type {
  26. QMI_CDEV_MAX_LIMIT_TYPE,
  27. QMI_CDEV_MIN_LIMIT_TYPE,
  28. QMI_CDEV_TYPE_NR,
  29. };
  30. struct qmi_cooling_device {
  31. struct device_node *np;
  32. char cdev_name[THERMAL_NAME_LENGTH];
  33. char qmi_name[QMI_CLIENT_NAME_LENGTH];
  34. bool connection_active;
  35. enum qmi_device_type type;
  36. struct list_head qmi_node;
  37. struct thermal_cooling_device *cdev;
  38. unsigned int mtgn_state;
  39. unsigned int max_level;
  40. struct qmi_tmd_instance *tmd;
  41. };
  42. struct qmi_tmd_instance {
  43. struct device *dev;
  44. struct qmi_handle *handle;
  45. struct mutex mutex;
  46. struct work_struct work_svc_arrive;
  47. struct work_struct work_svc_exit;
  48. struct work_struct work_rcv_msg;
  49. struct notifier_block nb;
  50. uint32_t inst_id;
  51. struct list_head tmd_cdev_list;
  52. };
  53. struct qmi_dev_info {
  54. char *dev_name;
  55. enum qmi_device_type type;
  56. };
  57. static struct workqueue_struct *qmi_tmd_wq;
  58. static struct qmi_tmd_instance *tmd_instances;
  59. static int tmd_inst_cnt;
  60. static struct qmi_dev_info device_clients[] = {
  61. {
  62. .dev_name = "pa",
  63. .type = QMI_CDEV_MAX_LIMIT_TYPE,
  64. },
  65. {
  66. .dev_name = "cx_vdd_limit",
  67. .type = QMI_CDEV_MAX_LIMIT_TYPE,
  68. },
  69. {
  70. .dev_name = "modem",
  71. .type = QMI_CDEV_MAX_LIMIT_TYPE,
  72. },
  73. {
  74. .dev_name = "modem_current",
  75. .type = QMI_CDEV_MAX_LIMIT_TYPE,
  76. },
  77. {
  78. .dev_name = "modem_skin",
  79. .type = QMI_CDEV_MAX_LIMIT_TYPE,
  80. },
  81. {
  82. .dev_name = "modem_bw",
  83. .type = QMI_CDEV_MAX_LIMIT_TYPE,
  84. },
  85. {
  86. .dev_name = "cpuv_restriction_cold",
  87. .type = QMI_CDEV_MIN_LIMIT_TYPE,
  88. },
  89. {
  90. .dev_name = "cpr_cold",
  91. .type = QMI_CDEV_MIN_LIMIT_TYPE,
  92. }
  93. };
  94. static int qmi_get_max_state(struct thermal_cooling_device *cdev,
  95. unsigned long *state)
  96. {
  97. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  98. if (!qmi_cdev)
  99. return -EINVAL;
  100. *state = qmi_cdev->max_level;
  101. return 0;
  102. }
  103. static int qmi_get_cur_state(struct thermal_cooling_device *cdev,
  104. unsigned long *state)
  105. {
  106. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  107. if (!qmi_cdev)
  108. return -EINVAL;
  109. if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE) {
  110. *state = 0;
  111. return 0;
  112. }
  113. *state = qmi_cdev->mtgn_state;
  114. return 0;
  115. }
  116. static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev,
  117. uint8_t state)
  118. {
  119. int ret = 0;
  120. struct tmd_set_mitigation_level_req_msg_v01 req;
  121. struct tmd_set_mitigation_level_resp_msg_v01 tmd_resp;
  122. struct msg_desc req_desc, resp_desc;
  123. struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
  124. memset(&req, 0, sizeof(req));
  125. memset(&tmd_resp, 0, sizeof(tmd_resp));
  126. strlcpy(req.mitigation_dev_id.mitigation_dev_id, qmi_cdev->qmi_name,
  127. QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01);
  128. req.mitigation_level = state;
  129. req_desc.max_msg_len = TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN;
  130. req_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01;
  131. req_desc.ei_array = tmd_set_mitigation_level_req_msg_v01_ei;
  132. resp_desc.max_msg_len =
  133. TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN;
  134. resp_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01;
  135. resp_desc.ei_array = tmd_set_mitigation_level_resp_msg_v01_ei;
  136. mutex_lock(&tmd->mutex);
  137. ret = qmi_send_req_wait(tmd->handle,
  138. &req_desc, &req, sizeof(req),
  139. &resp_desc, &tmd_resp, sizeof(tmd_resp),
  140. QMI_TMD_RESP_TOUT_MSEC);
  141. if (ret < 0) {
  142. pr_err("qmi set state:%d failed for %s ret:%d\n",
  143. state, qmi_cdev->cdev_name, ret);
  144. goto qmi_send_exit;
  145. }
  146. if (tmd_resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  147. ret = tmd_resp.resp.result;
  148. pr_err("qmi set state:%d NOT success for %s ret:%d\n",
  149. state, qmi_cdev->cdev_name, ret);
  150. goto qmi_send_exit;
  151. }
  152. pr_debug("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name);
  153. qmi_send_exit:
  154. mutex_unlock(&tmd->mutex);
  155. return ret;
  156. }
  157. static int qmi_set_cur_or_min_state(struct qmi_cooling_device *qmi_cdev,
  158. unsigned long state)
  159. {
  160. int ret = 0;
  161. struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
  162. if (!tmd)
  163. return -EINVAL;
  164. if (qmi_cdev->mtgn_state == state)
  165. return ret;
  166. /* save it and return if server exit */
  167. if (!qmi_cdev->connection_active) {
  168. qmi_cdev->mtgn_state = state;
  169. pr_debug("Pending request:%ld for %s\n", state,
  170. qmi_cdev->cdev_name);
  171. return ret;
  172. }
  173. /* It is best effort to save state even if QMI fail */
  174. ret = qmi_tmd_send_state_request(qmi_cdev, (uint8_t)state);
  175. qmi_cdev->mtgn_state = state;
  176. return ret;
  177. }
  178. static int qmi_set_cur_state(struct thermal_cooling_device *cdev,
  179. unsigned long state)
  180. {
  181. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  182. if (!qmi_cdev)
  183. return -EINVAL;
  184. if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE)
  185. return 0;
  186. if (state > qmi_cdev->max_level)
  187. state = qmi_cdev->max_level;
  188. return qmi_set_cur_or_min_state(qmi_cdev, state);
  189. }
  190. static int qmi_set_min_state(struct thermal_cooling_device *cdev,
  191. unsigned long state)
  192. {
  193. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  194. if (!qmi_cdev)
  195. return -EINVAL;
  196. if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE)
  197. return 0;
  198. if (state > qmi_cdev->max_level)
  199. state = qmi_cdev->max_level;
  200. /* Convert state into QMI client expects for min state */
  201. state = qmi_cdev->max_level - state;
  202. return qmi_set_cur_or_min_state(qmi_cdev, state);
  203. }
  204. static int qmi_get_min_state(struct thermal_cooling_device *cdev,
  205. unsigned long *state)
  206. {
  207. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  208. if (!qmi_cdev)
  209. return -EINVAL;
  210. if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE) {
  211. *state = 0;
  212. return 0;
  213. }
  214. *state = qmi_cdev->max_level - qmi_cdev->mtgn_state;
  215. return 0;
  216. }
  217. static struct thermal_cooling_device_ops qmi_device_ops = {
  218. .get_max_state = qmi_get_max_state,
  219. .get_cur_state = qmi_get_cur_state,
  220. .set_cur_state = qmi_set_cur_state,
  221. .set_min_state = qmi_set_min_state,
  222. .get_min_state = qmi_get_min_state,
  223. };
  224. static int qmi_register_cooling_device(struct qmi_cooling_device *qmi_cdev)
  225. {
  226. qmi_cdev->cdev = thermal_of_cooling_device_register(
  227. qmi_cdev->np,
  228. qmi_cdev->cdev_name,
  229. qmi_cdev,
  230. &qmi_device_ops);
  231. if (IS_ERR(qmi_cdev->cdev)) {
  232. pr_err("Cooling register failed for %s, ret:%ld\n",
  233. qmi_cdev->cdev_name, PTR_ERR(qmi_cdev->cdev));
  234. return PTR_ERR(qmi_cdev->cdev);
  235. }
  236. pr_debug("Cooling register success for %s\n", qmi_cdev->cdev_name);
  237. return 0;
  238. }
  239. static int verify_devices_and_register(struct qmi_tmd_instance *tmd)
  240. {
  241. struct tmd_get_mitigation_device_list_req_msg_v01 req;
  242. struct tmd_get_mitigation_device_list_resp_msg_v01 *tmd_resp;
  243. struct msg_desc req_desc, resp_desc;
  244. int ret = 0, i;
  245. memset(&req, 0, sizeof(req));
  246. /* size of tmd_resp is very high, use heap memory rather than stack */
  247. tmd_resp = kzalloc(sizeof(*tmd_resp), GFP_KERNEL);
  248. if (!tmd_resp)
  249. return -ENOMEM;
  250. req_desc.max_msg_len =
  251. TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN;
  252. req_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01;
  253. req_desc.ei_array = tmd_get_mitigation_device_list_req_msg_v01_ei;
  254. resp_desc.max_msg_len =
  255. TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN;
  256. resp_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01;
  257. resp_desc.ei_array = tmd_get_mitigation_device_list_resp_msg_v01_ei;
  258. mutex_lock(&tmd->mutex);
  259. ret = qmi_send_req_wait(tmd->handle,
  260. &req_desc, &req, sizeof(req),
  261. &resp_desc, tmd_resp, sizeof(*tmd_resp),
  262. 0);
  263. if (ret < 0) {
  264. pr_err("qmi get device list failed for inst_id:0x%x ret:%d\n",
  265. tmd->inst_id, ret);
  266. goto reg_exit;
  267. }
  268. if (tmd_resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  269. ret = tmd_resp->resp.result;
  270. pr_err("Get device list NOT success for inst_id:0x%x ret:%d\n",
  271. tmd->inst_id, ret);
  272. goto reg_exit;
  273. }
  274. mutex_unlock(&tmd->mutex);
  275. for (i = 0; i < tmd_resp->mitigation_device_list_len; i++) {
  276. struct qmi_cooling_device *qmi_cdev = NULL;
  277. list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list,
  278. qmi_node) {
  279. struct tmd_mitigation_dev_list_type_v01 *device =
  280. &tmd_resp->mitigation_device_list[i];
  281. if ((strncasecmp(qmi_cdev->qmi_name,
  282. device->mitigation_dev_id.mitigation_dev_id,
  283. QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01)))
  284. continue;
  285. qmi_cdev->connection_active = true;
  286. qmi_cdev->max_level = device->max_mitigation_level;
  287. /*
  288. * It is better to set current state
  289. * initially or during restart
  290. */
  291. qmi_tmd_send_state_request(qmi_cdev,
  292. qmi_cdev->mtgn_state);
  293. if (!qmi_cdev->cdev)
  294. ret = qmi_register_cooling_device(qmi_cdev);
  295. break;
  296. }
  297. }
  298. kfree(tmd_resp);
  299. return ret;
  300. reg_exit:
  301. mutex_unlock(&tmd->mutex);
  302. kfree(tmd_resp);
  303. return ret;
  304. }
  305. static void qmi_tmd_rcv_msg(struct work_struct *work)
  306. {
  307. int rc;
  308. struct qmi_tmd_instance *tmd = container_of(work,
  309. struct qmi_tmd_instance,
  310. work_rcv_msg);
  311. do {
  312. pr_debug("Notified about a Receive Event\n");
  313. } while ((rc = qmi_recv_msg(tmd->handle)) == 0);
  314. if (rc != -ENOMSG)
  315. pr_err("Error receiving message for SVC:0x%x, ret:%d\n",
  316. tmd->inst_id, rc);
  317. }
  318. static void qmi_tmd_clnt_notify(struct qmi_handle *handle,
  319. enum qmi_event_type event, void *priv_data)
  320. {
  321. struct qmi_tmd_instance *tmd =
  322. (struct qmi_tmd_instance *)priv_data;
  323. if (!tmd) {
  324. pr_debug("tmd is NULL\n");
  325. return;
  326. }
  327. switch (event) {
  328. case QMI_RECV_MSG:
  329. queue_work(qmi_tmd_wq, &tmd->work_rcv_msg);
  330. break;
  331. default:
  332. break;
  333. }
  334. }
  335. static void qmi_tmd_svc_arrive(struct work_struct *work)
  336. {
  337. int ret = 0;
  338. struct qmi_tmd_instance *tmd = container_of(work,
  339. struct qmi_tmd_instance,
  340. work_svc_arrive);
  341. mutex_lock(&tmd->mutex);
  342. tmd->handle = qmi_handle_create(qmi_tmd_clnt_notify, tmd);
  343. if (!tmd->handle) {
  344. pr_err("QMI TMD client handle alloc failed for 0x%x\n",
  345. tmd->inst_id);
  346. goto arrive_exit;
  347. }
  348. ret = qmi_connect_to_service(tmd->handle, TMD_SERVICE_ID_V01,
  349. TMD_SERVICE_VERS_V01,
  350. tmd->inst_id);
  351. if (ret < 0) {
  352. pr_err("Could not connect handle to service for 0x%x, ret:%d\n",
  353. tmd->inst_id, ret);
  354. qmi_handle_destroy(tmd->handle);
  355. tmd->handle = NULL;
  356. goto arrive_exit;
  357. }
  358. mutex_unlock(&tmd->mutex);
  359. verify_devices_and_register(tmd);
  360. return;
  361. arrive_exit:
  362. mutex_unlock(&tmd->mutex);
  363. }
  364. static void qmi_tmd_svc_exit(struct work_struct *work)
  365. {
  366. struct qmi_tmd_instance *tmd = container_of(work,
  367. struct qmi_tmd_instance,
  368. work_svc_exit);
  369. struct qmi_cooling_device *qmi_cdev;
  370. mutex_lock(&tmd->mutex);
  371. qmi_handle_destroy(tmd->handle);
  372. tmd->handle = NULL;
  373. list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list, qmi_node)
  374. qmi_cdev->connection_active = false;
  375. mutex_unlock(&tmd->mutex);
  376. }
  377. static int qmi_tmd_svc_event_notify(struct notifier_block *this,
  378. unsigned long event,
  379. void *data)
  380. {
  381. struct qmi_tmd_instance *tmd = container_of(this,
  382. struct qmi_tmd_instance,
  383. nb);
  384. if (!tmd) {
  385. pr_debug("tmd is NULL\n");
  386. return -EINVAL;
  387. }
  388. switch (event) {
  389. case QMI_SERVER_ARRIVE:
  390. schedule_work(&tmd->work_svc_arrive);
  391. break;
  392. case QMI_SERVER_EXIT:
  393. schedule_work(&tmd->work_svc_exit);
  394. break;
  395. default:
  396. break;
  397. }
  398. return 0;
  399. }
  400. static void qmi_tmd_cleanup(void)
  401. {
  402. int idx = 0;
  403. struct qmi_tmd_instance *tmd = tmd_instances;
  404. struct qmi_cooling_device *qmi_cdev, *c_next;
  405. for (; idx < tmd_inst_cnt; idx++) {
  406. mutex_lock(&tmd[idx].mutex);
  407. list_for_each_entry_safe(qmi_cdev, c_next,
  408. &tmd[idx].tmd_cdev_list, qmi_node) {
  409. if (qmi_cdev->cdev)
  410. thermal_cooling_device_unregister(
  411. qmi_cdev->cdev);
  412. list_del(&qmi_cdev->qmi_node);
  413. }
  414. if (tmd[idx].handle)
  415. qmi_handle_destroy(tmd[idx].handle);
  416. if (tmd[idx].nb.notifier_call)
  417. qmi_svc_event_notifier_unregister(TMD_SERVICE_ID_V01,
  418. TMD_SERVICE_VERS_V01,
  419. tmd[idx].inst_id,
  420. &tmd[idx].nb);
  421. mutex_unlock(&tmd[idx].mutex);
  422. }
  423. if (qmi_tmd_wq) {
  424. destroy_workqueue(qmi_tmd_wq);
  425. qmi_tmd_wq = NULL;
  426. }
  427. }
  428. static int of_get_qmi_tmd_platform_data(struct device *dev)
  429. {
  430. int ret = 0, idx = 0, i = 0, subsys_cnt = 0;
  431. struct device_node *np = dev->of_node;
  432. struct device_node *subsys_np, *cdev_np;
  433. struct qmi_tmd_instance *tmd;
  434. struct qmi_cooling_device *qmi_cdev;
  435. subsys_cnt = of_get_available_child_count(np);
  436. if (!subsys_cnt) {
  437. dev_err(dev, "No child node to process\n");
  438. return -EFAULT;
  439. }
  440. tmd = devm_kcalloc(dev, subsys_cnt, sizeof(*tmd), GFP_KERNEL);
  441. if (!tmd)
  442. return -ENOMEM;
  443. for_each_available_child_of_node(np, subsys_np) {
  444. if (idx >= subsys_cnt)
  445. break;
  446. ret = of_property_read_u32(subsys_np, "qcom,instance-id",
  447. &tmd[idx].inst_id);
  448. if (ret) {
  449. dev_err(dev, "error reading qcom,insance-id. ret:%d\n",
  450. ret);
  451. return ret;
  452. }
  453. tmd[idx].dev = dev;
  454. mutex_init(&tmd[idx].mutex);
  455. INIT_LIST_HEAD(&tmd[idx].tmd_cdev_list);
  456. for_each_available_child_of_node(subsys_np, cdev_np) {
  457. const char *qmi_name;
  458. qmi_cdev = devm_kzalloc(dev, sizeof(*qmi_cdev),
  459. GFP_KERNEL);
  460. if (!qmi_cdev) {
  461. ret = -ENOMEM;
  462. return ret;
  463. }
  464. strlcpy(qmi_cdev->cdev_name, cdev_np->name,
  465. THERMAL_NAME_LENGTH);
  466. if (!of_property_read_string(cdev_np,
  467. "qcom,qmi-dev-name",
  468. &qmi_name)) {
  469. strlcpy(qmi_cdev->qmi_name, qmi_name,
  470. QMI_CLIENT_NAME_LENGTH);
  471. } else {
  472. dev_err(dev, "Fail to parse dev name for %s\n",
  473. cdev_np->name);
  474. break;
  475. }
  476. /* Check for supported qmi dev*/
  477. for (i = 0; i < ARRAY_SIZE(device_clients); i++) {
  478. if (strcmp(device_clients[i].dev_name,
  479. qmi_cdev->qmi_name) == 0)
  480. break;
  481. }
  482. if (i >= ARRAY_SIZE(device_clients)) {
  483. dev_err(dev, "Not supported dev name for %s\n",
  484. cdev_np->name);
  485. break;
  486. }
  487. qmi_cdev->type = device_clients[i].type;
  488. qmi_cdev->tmd = &tmd[idx];
  489. qmi_cdev->np = cdev_np;
  490. qmi_cdev->mtgn_state = 0;
  491. list_add(&qmi_cdev->qmi_node, &tmd[idx].tmd_cdev_list);
  492. }
  493. idx++;
  494. }
  495. tmd_instances = tmd;
  496. tmd_inst_cnt = subsys_cnt;
  497. return 0;
  498. }
  499. static int qmi_device_probe(struct platform_device *pdev)
  500. {
  501. struct device *dev = &pdev->dev;
  502. int ret = 0, idx = 0;
  503. ret = of_get_qmi_tmd_platform_data(dev);
  504. if (ret)
  505. goto probe_err;
  506. if (!tmd_instances || !tmd_inst_cnt) {
  507. dev_err(dev, "Empty tmd instances\n");
  508. return -EINVAL;
  509. }
  510. qmi_tmd_wq = create_singlethread_workqueue("qmi_tmd_wq");
  511. if (!qmi_tmd_wq) {
  512. dev_err(dev, "Failed to create single thread workqueue\n");
  513. ret = -EFAULT;
  514. goto probe_err;
  515. }
  516. for (; idx < tmd_inst_cnt; idx++) {
  517. struct qmi_tmd_instance *tmd = &tmd_instances[idx];
  518. if (list_empty(&tmd->tmd_cdev_list))
  519. continue;
  520. tmd->nb.notifier_call = qmi_tmd_svc_event_notify;
  521. INIT_WORK(&tmd->work_svc_arrive, qmi_tmd_svc_arrive);
  522. INIT_WORK(&tmd->work_svc_exit, qmi_tmd_svc_exit);
  523. INIT_WORK(&tmd->work_rcv_msg, qmi_tmd_rcv_msg);
  524. ret = qmi_svc_event_notifier_register(TMD_SERVICE_ID_V01,
  525. TMD_SERVICE_VERS_V01,
  526. tmd->inst_id,
  527. &tmd->nb);
  528. if (ret < 0) {
  529. dev_err(dev, "QMI register failed for 0x%x, ret:%d\n",
  530. tmd->inst_id, ret);
  531. goto probe_err;
  532. }
  533. }
  534. return 0;
  535. probe_err:
  536. qmi_tmd_cleanup();
  537. return ret;
  538. }
  539. static int qmi_device_remove(struct platform_device *pdev)
  540. {
  541. qmi_tmd_cleanup();
  542. return 0;
  543. }
  544. static const struct of_device_id qmi_device_match[] = {
  545. {.compatible = "qcom,qmi_cooling_devices"},
  546. {}
  547. };
  548. static struct platform_driver qmi_device_driver = {
  549. .probe = qmi_device_probe,
  550. .remove = qmi_device_remove,
  551. .driver = {
  552. .name = "QMI_CDEV_DRIVER",
  553. .owner = THIS_MODULE,
  554. .of_match_table = qmi_device_match,
  555. },
  556. };
  557. static int __init qmi_device_init(void)
  558. {
  559. return platform_driver_register(&qmi_device_driver);
  560. }
  561. module_init(qmi_device_init);
  562. static void __exit qmi_device_exit(void)
  563. {
  564. platform_driver_unregister(&qmi_device_driver);
  565. }
  566. module_exit(qmi_device_exit);
  567. MODULE_LICENSE("GPL v2");
  568. MODULE_DESCRIPTION("QTI QMI cooling device driver");