diagfwd_cntl.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686
  1. /* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/diagchar.h>
  14. #include <linux/kmemleak.h>
  15. #include <linux/delay.h>
  16. #include "diagchar.h"
  17. #include "diagfwd.h"
  18. #include "diagfwd_cntl.h"
  19. #include "diagfwd_peripheral.h"
  20. #include "diagfwd_bridge.h"
  21. #include "diag_dci.h"
  22. #include "diagmem.h"
  23. #include "diag_masks.h"
  24. #include "diag_ipc_logging.h"
  25. #include "diag_mux.h"
  26. #define FEATURE_SUPPORTED(x) ((feature_mask << (i * 8)) & (1 << x))
  27. /* tracks which peripheral is undergoing SSR */
  28. static uint16_t reg_dirty;
  29. static uint8_t diag_id = DIAG_ID_APPS;
  30. static void diag_notify_md_client(uint8_t peripheral, int data);
  31. static void diag_mask_update_work_fn(struct work_struct *work)
  32. {
  33. uint8_t peripheral;
  34. for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
  35. if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
  36. continue;
  37. diag_send_updates_peripheral(peripheral);
  38. }
  39. }
  40. void diag_cntl_channel_open(struct diagfwd_info *p_info)
  41. {
  42. if (!p_info)
  43. return;
  44. driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
  45. queue_work(driver->cntl_wq, &driver->mask_update_work);
  46. diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
  47. }
  48. void diag_cntl_channel_close(struct diagfwd_info *p_info)
  49. {
  50. uint8_t peripheral;
  51. if (!p_info)
  52. return;
  53. peripheral = p_info->peripheral;
  54. if (peripheral >= NUM_PERIPHERALS)
  55. return;
  56. driver->feature[peripheral].sent_feature_mask = 0;
  57. driver->feature[peripheral].rcvd_feature_mask = 0;
  58. reg_dirty |= PERIPHERAL_MASK(peripheral);
  59. diag_cmd_remove_reg_by_proc(peripheral);
  60. driver->diag_id_sent[peripheral] = 0;
  61. driver->feature[peripheral].stm_support = DISABLE_STM;
  62. driver->feature[peripheral].log_on_demand = 0;
  63. driver->stm_state[peripheral] = DISABLE_STM;
  64. driver->stm_state_requested[peripheral] = DISABLE_STM;
  65. reg_dirty ^= PERIPHERAL_MASK(peripheral);
  66. diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
  67. }
  68. static void diag_stm_update_work_fn(struct work_struct *work)
  69. {
  70. uint8_t i;
  71. uint16_t peripheral_mask = 0;
  72. int err = 0;
  73. mutex_lock(&driver->cntl_lock);
  74. peripheral_mask = driver->stm_peripheral;
  75. driver->stm_peripheral = 0;
  76. mutex_unlock(&driver->cntl_lock);
  77. if (peripheral_mask == 0)
  78. return;
  79. for (i = 0; i < NUM_PERIPHERALS; i++) {
  80. if (!driver->feature[i].stm_support)
  81. continue;
  82. if (peripheral_mask & PERIPHERAL_MASK(i)) {
  83. err = diag_send_stm_state(i,
  84. (uint8_t)(driver->stm_state_requested[i]));
  85. if (!err) {
  86. driver->stm_state[i] =
  87. driver->stm_state_requested[i];
  88. }
  89. }
  90. }
  91. }
  92. void diag_notify_md_client(uint8_t peripheral, int data)
  93. {
  94. int stat = 0;
  95. struct siginfo info;
  96. struct pid *pid_struct;
  97. struct task_struct *result;
  98. if (peripheral > NUM_PERIPHERALS)
  99. return;
  100. if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
  101. return;
  102. mutex_lock(&driver->md_session_lock);
  103. memset(&info, 0, sizeof(struct siginfo));
  104. info.si_code = SI_QUEUE;
  105. info.si_int = (PERIPHERAL_MASK(peripheral) | data);
  106. info.si_signo = SIGCONT;
  107. if (!driver->md_session_map[peripheral] ||
  108. driver->md_session_map[peripheral]->pid <= 0) {
  109. pr_err("diag: md_session_map[%d] is invalid\n", peripheral);
  110. mutex_unlock(&driver->md_session_lock);
  111. return;
  112. }
  113. pid_struct = find_get_pid(
  114. driver->md_session_map[peripheral]->pid);
  115. DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
  116. "md_session_map[%d] pid = %d task = %pK\n",
  117. peripheral,
  118. driver->md_session_map[peripheral]->pid,
  119. driver->md_session_map[peripheral]->task);
  120. if (pid_struct) {
  121. result = get_pid_task(pid_struct, PIDTYPE_PID);
  122. if (!result) {
  123. DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
  124. "diag: md_session_map[%d] with pid = %d Exited..\n",
  125. peripheral,
  126. driver->md_session_map[peripheral]->pid);
  127. mutex_unlock(&driver->md_session_lock);
  128. return;
  129. }
  130. if (driver->md_session_map[peripheral] &&
  131. driver->md_session_map[peripheral]->task == result) {
  132. stat = send_sig_info(info.si_signo,
  133. &info, result);
  134. if (stat)
  135. pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
  136. info.si_int, stat);
  137. } else
  138. pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
  139. peripheral, info.si_int, stat);
  140. }
  141. mutex_unlock(&driver->md_session_lock);
  142. }
  143. static void process_pd_status(uint8_t *buf, uint32_t len,
  144. uint8_t peripheral)
  145. {
  146. struct diag_ctrl_msg_pd_status *pd_msg = NULL;
  147. uint32_t pd;
  148. int status = DIAG_STATUS_CLOSED;
  149. if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
  150. return;
  151. pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
  152. pd = pd_msg->pd_id;
  153. status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
  154. diag_notify_md_client(peripheral, status);
  155. }
  156. static void enable_stm_feature(uint8_t peripheral)
  157. {
  158. if (peripheral >= NUM_PERIPHERALS)
  159. return;
  160. mutex_lock(&driver->cntl_lock);
  161. driver->feature[peripheral].stm_support = ENABLE_STM;
  162. driver->stm_peripheral |= PERIPHERAL_MASK(peripheral);
  163. mutex_unlock(&driver->cntl_lock);
  164. queue_work(driver->cntl_wq, &(driver->stm_update_work));
  165. }
  166. static void enable_socket_feature(uint8_t peripheral)
  167. {
  168. if (peripheral >= NUM_PERIPHERALS)
  169. return;
  170. if (driver->supports_sockets)
  171. driver->feature[peripheral].sockets_enabled = 1;
  172. else
  173. driver->feature[peripheral].sockets_enabled = 0;
  174. }
  175. static void process_hdlc_encoding_feature(uint8_t peripheral)
  176. {
  177. if (peripheral >= NUM_PERIPHERALS)
  178. return;
  179. if (driver->supports_apps_hdlc_encoding) {
  180. driver->feature[peripheral].encode_hdlc =
  181. ENABLE_APPS_HDLC_ENCODING;
  182. } else {
  183. driver->feature[peripheral].encode_hdlc =
  184. DISABLE_APPS_HDLC_ENCODING;
  185. }
  186. }
  187. static void process_upd_header_untagging_feature(uint8_t peripheral)
  188. {
  189. if (peripheral >= NUM_PERIPHERALS)
  190. return;
  191. if (driver->supports_apps_header_untagging) {
  192. driver->feature[peripheral].untag_header =
  193. ENABLE_PKT_HEADER_UNTAGGING;
  194. } else {
  195. driver->feature[peripheral].untag_header =
  196. DISABLE_PKT_HEADER_UNTAGGING;
  197. }
  198. }
  199. static void process_command_deregistration(uint8_t *buf, uint32_t len,
  200. uint8_t peripheral)
  201. {
  202. uint8_t *ptr = buf;
  203. int i;
  204. int header_len = sizeof(struct diag_ctrl_cmd_dereg);
  205. int read_len = 0;
  206. struct diag_ctrl_cmd_dereg *dereg = NULL;
  207. struct cmd_code_range *range = NULL;
  208. struct diag_cmd_reg_entry_t del_entry;
  209. /*
  210. * Perform Basic sanity. The len field is the size of the data payload.
  211. * This doesn't include the header size.
  212. */
  213. if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
  214. return;
  215. dereg = (struct diag_ctrl_cmd_dereg *)ptr;
  216. ptr += header_len;
  217. /* Don't account for pkt_id and length */
  218. read_len += header_len - (2 * sizeof(uint32_t));
  219. if (dereg->count_entries == 0) {
  220. pr_debug("diag: In %s, received reg tbl with no entries\n",
  221. __func__);
  222. return;
  223. }
  224. for (i = 0; i < dereg->count_entries && read_len < len; i++) {
  225. range = (struct cmd_code_range *)ptr;
  226. ptr += sizeof(struct cmd_code_range) - sizeof(uint32_t);
  227. read_len += sizeof(struct cmd_code_range) - sizeof(uint32_t);
  228. del_entry.cmd_code = dereg->cmd_code;
  229. del_entry.subsys_id = dereg->subsysid;
  230. del_entry.cmd_code_hi = range->cmd_code_hi;
  231. del_entry.cmd_code_lo = range->cmd_code_lo;
  232. diag_cmd_remove_reg(&del_entry, peripheral);
  233. }
  234. if (i != dereg->count_entries) {
  235. pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
  236. __func__, read_len, len, dereg->count_entries);
  237. }
  238. }
  239. static void process_command_registration(uint8_t *buf, uint32_t len,
  240. uint8_t peripheral)
  241. {
  242. uint8_t *ptr = buf;
  243. int i;
  244. int header_len = sizeof(struct diag_ctrl_cmd_reg);
  245. int read_len = 0;
  246. struct diag_ctrl_cmd_reg *reg = NULL;
  247. struct cmd_code_range *range = NULL;
  248. struct diag_cmd_reg_entry_t new_entry;
  249. /*
  250. * Perform Basic sanity. The len field is the size of the data payload.
  251. * This doesn't include the header size.
  252. */
  253. if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
  254. return;
  255. reg = (struct diag_ctrl_cmd_reg *)ptr;
  256. ptr += header_len;
  257. /* Don't account for pkt_id and length */
  258. read_len += header_len - (2 * sizeof(uint32_t));
  259. if (reg->count_entries == 0) {
  260. pr_debug("diag: In %s, received reg tbl with no entries\n",
  261. __func__);
  262. return;
  263. }
  264. for (i = 0; i < reg->count_entries && read_len < len; i++) {
  265. range = (struct cmd_code_range *)ptr;
  266. ptr += sizeof(struct cmd_code_range);
  267. read_len += sizeof(struct cmd_code_range);
  268. new_entry.cmd_code = reg->cmd_code;
  269. new_entry.subsys_id = reg->subsysid;
  270. new_entry.cmd_code_hi = range->cmd_code_hi;
  271. new_entry.cmd_code_lo = range->cmd_code_lo;
  272. diag_cmd_add_reg(&new_entry, peripheral, INVALID_PID);
  273. }
  274. if (i != reg->count_entries) {
  275. pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
  276. __func__, read_len, len, reg->count_entries);
  277. }
  278. }
  279. static void diag_close_transport_work_fn(struct work_struct *work)
  280. {
  281. uint8_t transport;
  282. uint8_t peripheral;
  283. mutex_lock(&driver->cntl_lock);
  284. for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
  285. if (!(driver->close_transport & PERIPHERAL_MASK(peripheral)))
  286. continue;
  287. driver->close_transport ^= PERIPHERAL_MASK(peripheral);
  288. #ifdef CONFIG_DIAG_USES_SMD
  289. transport = driver->feature[peripheral].sockets_enabled ?
  290. TRANSPORT_SMD : TRANSPORT_SOCKET;
  291. #else
  292. transport = driver->feature[peripheral].sockets_enabled ?
  293. TRANSPORT_GLINK : TRANSPORT_SOCKET;
  294. #endif
  295. diagfwd_close_transport(transport, peripheral);
  296. }
  297. mutex_unlock(&driver->cntl_lock);
  298. }
  299. static void process_socket_feature(uint8_t peripheral)
  300. {
  301. if (peripheral >= NUM_PERIPHERALS)
  302. return;
  303. mutex_lock(&driver->cntl_lock);
  304. driver->close_transport |= PERIPHERAL_MASK(peripheral);
  305. queue_work(driver->cntl_wq, &driver->close_transport_work);
  306. mutex_unlock(&driver->cntl_lock);
  307. }
  308. static void process_log_on_demand_feature(uint8_t peripheral)
  309. {
  310. /* Log On Demand command is registered only on Modem */
  311. if (peripheral != PERIPHERAL_MODEM)
  312. return;
  313. if (driver->feature[PERIPHERAL_MODEM].log_on_demand)
  314. driver->log_on_demand_support = 1;
  315. else
  316. driver->log_on_demand_support = 0;
  317. }
  318. static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
  319. uint8_t peripheral)
  320. {
  321. int i;
  322. int header_len = sizeof(struct diag_ctrl_feature_mask);
  323. int read_len = 0;
  324. struct diag_ctrl_feature_mask *header = NULL;
  325. uint32_t feature_mask_len = 0;
  326. uint32_t feature_mask = 0;
  327. uint8_t *ptr = buf;
  328. if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
  329. return;
  330. header = (struct diag_ctrl_feature_mask *)ptr;
  331. ptr += header_len;
  332. feature_mask_len = header->feature_mask_len;
  333. if (feature_mask_len == 0) {
  334. pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
  335. __func__, peripheral);
  336. return;
  337. }
  338. if (feature_mask_len > FEATURE_MASK_LEN) {
  339. pr_alert("diag: Receiving feature mask length more than Apps support\n");
  340. feature_mask_len = FEATURE_MASK_LEN;
  341. }
  342. diag_cmd_remove_reg_by_proc(peripheral);
  343. driver->feature[peripheral].rcvd_feature_mask = 1;
  344. for (i = 0; i < feature_mask_len && read_len < len; i++) {
  345. feature_mask = *(uint8_t *)ptr;
  346. driver->feature[peripheral].feature_mask[i] = feature_mask;
  347. ptr += sizeof(uint8_t);
  348. read_len += sizeof(uint8_t);
  349. if (FEATURE_SUPPORTED(F_DIAG_LOG_ON_DEMAND_APPS))
  350. driver->feature[peripheral].log_on_demand = 1;
  351. if (FEATURE_SUPPORTED(F_DIAG_REQ_RSP_SUPPORT))
  352. driver->feature[peripheral].separate_cmd_rsp = 1;
  353. if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
  354. process_hdlc_encoding_feature(peripheral);
  355. if (FEATURE_SUPPORTED(F_DIAG_PKT_HEADER_UNTAG))
  356. process_upd_header_untagging_feature(peripheral);
  357. if (FEATURE_SUPPORTED(F_DIAG_STM))
  358. enable_stm_feature(peripheral);
  359. if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
  360. driver->feature[peripheral].mask_centralization = 1;
  361. if (FEATURE_SUPPORTED(F_DIAG_PERIPHERAL_BUFFERING))
  362. driver->feature[peripheral].peripheral_buffering = 1;
  363. if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
  364. enable_socket_feature(peripheral);
  365. if (FEATURE_SUPPORTED(F_DIAG_DIAGID_SUPPORT))
  366. driver->feature[peripheral].diag_id_support = 1;
  367. if (FEATURE_SUPPORTED(F_DIAG_PD_BUFFERING))
  368. driver->feature[peripheral].pd_buffering = 1;
  369. }
  370. process_socket_feature(peripheral);
  371. process_log_on_demand_feature(peripheral);
  372. }
  373. static void process_last_event_report(uint8_t *buf, uint32_t len,
  374. uint8_t peripheral)
  375. {
  376. struct diag_ctrl_last_event_report *header = NULL;
  377. uint8_t *ptr = buf;
  378. uint8_t *temp = NULL;
  379. uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
  380. uint16_t event_size = 0;
  381. if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
  382. return;
  383. mutex_lock(&event_mask.lock);
  384. header = (struct diag_ctrl_last_event_report *)ptr;
  385. event_size = ((header->event_last_id / 8) + 1);
  386. if (event_size >= driver->event_mask_size) {
  387. DIAG_LOG(DIAG_DEBUG_MASKS,
  388. "diag: receiving event mask size more that Apps can handle\n");
  389. temp = krealloc(driver->event_mask->ptr, event_size,
  390. GFP_KERNEL);
  391. if (!temp) {
  392. pr_err("diag: In %s, unable to reallocate event mask to support events from %d\n",
  393. __func__, peripheral);
  394. goto err;
  395. }
  396. driver->event_mask->ptr = temp;
  397. driver->event_mask_size = event_size;
  398. }
  399. driver->num_event_id[peripheral] = header->event_last_id;
  400. if (header->event_last_id > driver->last_event_id)
  401. driver->last_event_id = header->event_last_id;
  402. err:
  403. mutex_unlock(&event_mask.lock);
  404. }
  405. static void process_log_range_report(uint8_t *buf, uint32_t len,
  406. uint8_t peripheral)
  407. {
  408. int i;
  409. int read_len = 0;
  410. int header_len = sizeof(struct diag_ctrl_log_range_report);
  411. uint8_t *ptr = buf;
  412. struct diag_ctrl_log_range_report *header = NULL;
  413. struct diag_ctrl_log_range *log_range = NULL;
  414. struct diag_log_mask_t *mask_ptr = NULL;
  415. if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
  416. return;
  417. header = (struct diag_ctrl_log_range_report *)ptr;
  418. ptr += header_len;
  419. /* Don't account for pkt_id and length */
  420. read_len += header_len - (2 * sizeof(uint32_t));
  421. driver->num_equip_id[peripheral] = header->num_ranges;
  422. for (i = 0; i < header->num_ranges && read_len < len; i++) {
  423. log_range = (struct diag_ctrl_log_range *)ptr;
  424. ptr += sizeof(struct diag_ctrl_log_range);
  425. read_len += sizeof(struct diag_ctrl_log_range);
  426. if (log_range->equip_id >= MAX_EQUIP_ID) {
  427. pr_err("diag: receiving log equip id %d more than supported equip id: %d from peripheral: %d\n",
  428. log_range->equip_id, MAX_EQUIP_ID, peripheral);
  429. continue;
  430. }
  431. mask_ptr = (struct diag_log_mask_t *)log_mask.ptr;
  432. mask_ptr = &mask_ptr[log_range->equip_id];
  433. mutex_lock(&(mask_ptr->lock));
  434. mask_ptr->num_items = log_range->num_items;
  435. mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
  436. mutex_unlock(&(mask_ptr->lock));
  437. }
  438. }
  439. static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
  440. struct diag_ssid_range_t *range)
  441. {
  442. uint32_t temp_range;
  443. if (!mask || !range)
  444. return -EIO;
  445. if (range->ssid_last < range->ssid_first) {
  446. pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
  447. __func__, range->ssid_first, range->ssid_last);
  448. return -EINVAL;
  449. }
  450. if (range->ssid_last >= mask->ssid_last) {
  451. temp_range = range->ssid_last - mask->ssid_first + 1;
  452. if (temp_range > MAX_SSID_PER_RANGE) {
  453. temp_range = MAX_SSID_PER_RANGE;
  454. mask->ssid_last = mask->ssid_first + temp_range - 1;
  455. } else
  456. mask->ssid_last = range->ssid_last;
  457. mask->ssid_last_tools = mask->ssid_last;
  458. mask->range = temp_range;
  459. }
  460. return 0;
  461. }
  462. static void process_ssid_range_report(uint8_t *buf, uint32_t len,
  463. uint8_t peripheral)
  464. {
  465. int i;
  466. int j;
  467. int read_len = 0;
  468. int found = 0;
  469. int new_size = 0;
  470. int err = 0;
  471. struct diag_ctrl_ssid_range_report *header = NULL;
  472. struct diag_ssid_range_t *ssid_range = NULL;
  473. int header_len = sizeof(struct diag_ctrl_ssid_range_report);
  474. struct diag_msg_mask_t *mask_ptr = NULL;
  475. uint8_t *ptr = buf;
  476. uint8_t *temp = NULL;
  477. uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
  478. if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
  479. return;
  480. header = (struct diag_ctrl_ssid_range_report *)ptr;
  481. ptr += header_len;
  482. /* Don't account for pkt_id and length */
  483. read_len += header_len - (2 * sizeof(uint32_t));
  484. mutex_lock(&driver->msg_mask_lock);
  485. driver->max_ssid_count[peripheral] = header->count;
  486. for (i = 0; i < header->count && read_len < len; i++) {
  487. ssid_range = (struct diag_ssid_range_t *)ptr;
  488. ptr += sizeof(struct diag_ssid_range_t);
  489. read_len += sizeof(struct diag_ssid_range_t);
  490. mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
  491. found = 0;
  492. for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
  493. if (!mask_ptr->ptr || !ssid_range) {
  494. found = 1;
  495. break;
  496. }
  497. if (mask_ptr->ssid_first != ssid_range->ssid_first)
  498. continue;
  499. mutex_lock(&mask_ptr->lock);
  500. err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
  501. mutex_unlock(&mask_ptr->lock);
  502. if (err == -ENOMEM) {
  503. pr_err("diag: In %s, unable to increase the msg mask table range\n",
  504. __func__);
  505. }
  506. found = 1;
  507. break;
  508. }
  509. if (found)
  510. continue;
  511. new_size = (driver->msg_mask_tbl_count + 1) *
  512. sizeof(struct diag_msg_mask_t);
  513. DIAG_LOG(DIAG_DEBUG_MASKS,
  514. "diag: receiving msg mask size more that Apps can handle\n");
  515. temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
  516. if (!temp) {
  517. pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
  518. __func__, ssid_range->ssid_first,
  519. ssid_range->ssid_last);
  520. continue;
  521. }
  522. msg_mask.ptr = temp;
  523. mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
  524. err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
  525. if (err) {
  526. pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
  527. __func__, ssid_range->ssid_first,
  528. ssid_range->ssid_last, err);
  529. continue;
  530. }
  531. driver->msg_mask_tbl_count += 1;
  532. }
  533. mutex_unlock(&driver->msg_mask_lock);
  534. }
  535. static void diag_build_time_mask_update(uint8_t *buf,
  536. struct diag_ssid_range_t *range)
  537. {
  538. int i;
  539. int j;
  540. int num_items = 0;
  541. int err = 0;
  542. int found = 0;
  543. int new_size = 0;
  544. uint8_t *temp = NULL;
  545. uint32_t *mask_ptr = (uint32_t *)buf;
  546. uint32_t *dest_ptr = NULL;
  547. struct diag_msg_mask_t *build_mask = NULL;
  548. if (!range || !buf)
  549. return;
  550. if (range->ssid_last < range->ssid_first) {
  551. pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
  552. __func__, range->ssid_first, range->ssid_last);
  553. return;
  554. }
  555. mutex_lock(&driver->msg_mask_lock);
  556. build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
  557. num_items = range->ssid_last - range->ssid_first + 1;
  558. for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
  559. if (!build_mask->ptr) {
  560. found = 1;
  561. break;
  562. }
  563. if (build_mask->ssid_first != range->ssid_first)
  564. continue;
  565. found = 1;
  566. mutex_lock(&build_mask->lock);
  567. err = update_msg_mask_tbl_entry(build_mask, range);
  568. if (err == -ENOMEM) {
  569. pr_err("diag: In %s, unable to increase the msg build mask table range\n",
  570. __func__);
  571. }
  572. dest_ptr = build_mask->ptr;
  573. for (j = 0; (j < build_mask->range) && mask_ptr && dest_ptr;
  574. j++, mask_ptr++, dest_ptr++)
  575. *(uint32_t *)dest_ptr |= *mask_ptr;
  576. mutex_unlock(&build_mask->lock);
  577. break;
  578. }
  579. if (found)
  580. goto end;
  581. new_size = (driver->bt_msg_mask_tbl_count + 1) *
  582. sizeof(struct diag_msg_mask_t);
  583. DIAG_LOG(DIAG_DEBUG_MASKS,
  584. "diag: receiving build time mask size more that Apps can handle\n");
  585. temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
  586. if (!temp) {
  587. pr_err("diag: In %s, unable to create a new entry for build time mask\n",
  588. __func__);
  589. goto end;
  590. }
  591. driver->build_time_mask->ptr = temp;
  592. build_mask = (struct diag_msg_mask_t *)driver->build_time_mask->ptr;
  593. err = diag_create_msg_mask_table_entry(build_mask, range);
  594. if (err) {
  595. pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
  596. __func__, err);
  597. goto end;
  598. }
  599. driver->bt_msg_mask_tbl_count += 1;
  600. end:
  601. mutex_unlock(&driver->msg_mask_lock);
  602. return;
  603. }
  604. static void process_build_mask_report(uint8_t *buf, uint32_t len,
  605. uint8_t peripheral)
  606. {
  607. int i;
  608. int read_len = 0;
  609. int num_items = 0;
  610. int header_len = sizeof(struct diag_ctrl_build_mask_report);
  611. uint8_t *ptr = buf;
  612. struct diag_ctrl_build_mask_report *header = NULL;
  613. struct diag_ssid_range_t *range = NULL;
  614. if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
  615. return;
  616. header = (struct diag_ctrl_build_mask_report *)ptr;
  617. ptr += header_len;
  618. /* Don't account for pkt_id and length */
  619. read_len += header_len - (2 * sizeof(uint32_t));
  620. for (i = 0; i < header->count && read_len < len; i++) {
  621. range = (struct diag_ssid_range_t *)ptr;
  622. ptr += sizeof(struct diag_ssid_range_t);
  623. read_len += sizeof(struct diag_ssid_range_t);
  624. num_items = range->ssid_last - range->ssid_first + 1;
  625. diag_build_time_mask_update(ptr, range);
  626. ptr += num_items * sizeof(uint32_t);
  627. read_len += num_items * sizeof(uint32_t);
  628. }
  629. }
  630. int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name,
  631. uint8_t pd_val, uint8_t peripheral)
  632. {
  633. struct diag_id_tbl_t *new_item = NULL;
  634. if (!process_name || diag_id == 0)
  635. return -EINVAL;
  636. new_item = kzalloc(sizeof(struct diag_id_tbl_t), GFP_KERNEL);
  637. if (!new_item)
  638. return -ENOMEM;
  639. kmemleak_not_leak(new_item);
  640. new_item->process_name = kzalloc(strlen(process_name) + 1, GFP_KERNEL);
  641. if (!new_item->process_name) {
  642. kfree(new_item);
  643. new_item = NULL;
  644. return -ENOMEM;
  645. }
  646. kmemleak_not_leak(new_item->process_name);
  647. new_item->diag_id = diag_id;
  648. new_item->pd_val = pd_val;
  649. new_item->peripheral = peripheral;
  650. strlcpy(new_item->process_name, process_name, strlen(process_name) + 1);
  651. INIT_LIST_HEAD(&new_item->link);
  652. mutex_lock(&driver->diag_id_mutex);
  653. list_add_tail(&new_item->link, &driver->diag_id_list);
  654. mutex_unlock(&driver->diag_id_mutex);
  655. return 0;
  656. }
  657. int diag_query_diag_id(char *process_name, uint8_t *diag_id)
  658. {
  659. struct list_head *start;
  660. struct list_head *temp;
  661. struct diag_id_tbl_t *item = NULL;
  662. if (!process_name || !diag_id)
  663. return -EINVAL;
  664. mutex_lock(&driver->diag_id_mutex);
  665. list_for_each_safe(start, temp, &driver->diag_id_list) {
  666. item = list_entry(start, struct diag_id_tbl_t, link);
  667. if (strcmp(item->process_name, process_name) == 0) {
  668. *diag_id = item->diag_id;
  669. mutex_unlock(&driver->diag_id_mutex);
  670. return 1;
  671. }
  672. }
  673. mutex_unlock(&driver->diag_id_mutex);
  674. return 0;
  675. }
  676. static void process_diagid(uint8_t *buf, uint32_t len,
  677. uint8_t peripheral)
  678. {
  679. struct diag_ctrl_diagid *header = NULL;
  680. struct diag_ctrl_diagid ctrl_pkt;
  681. struct diagfwd_info *fwd_info = NULL;
  682. char *process_name = NULL;
  683. int err = 0;
  684. int pd_val;
  685. char *root_str = NULL;
  686. uint8_t local_diag_id = 0;
  687. uint8_t new_request = 0, i = 0, ch_type = 0;
  688. if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS)
  689. return;
  690. header = (struct diag_ctrl_diagid *)buf;
  691. process_name = (char *)&header->process_name;
  692. if (diag_query_diag_id(process_name, &local_diag_id))
  693. ctrl_pkt.diag_id = local_diag_id;
  694. else {
  695. diag_id++;
  696. new_request = 1;
  697. pd_val = diag_query_pd(process_name);
  698. if (pd_val < 0)
  699. return;
  700. diag_add_diag_id_to_list(diag_id, process_name,
  701. pd_val, peripheral);
  702. ctrl_pkt.diag_id = diag_id;
  703. }
  704. root_str = strnstr(process_name, DIAG_ID_ROOT_STRING,
  705. strlen(process_name));
  706. if (new_request) {
  707. for (ch_type = 0; ch_type < NUM_TYPES; ch_type++) {
  708. if (ch_type == TYPE_DCI ||
  709. ch_type == TYPE_DCI_CMD)
  710. continue;
  711. fwd_info = &peripheral_info[ch_type][peripheral];
  712. fwd_info->num_pd++;
  713. if (root_str) {
  714. fwd_info->root_diag_id.diagid_val =
  715. ctrl_pkt.diag_id;
  716. fwd_info->root_diag_id.reg_str =
  717. process_name;
  718. fwd_info->root_diag_id.pd = pd_val;
  719. } else {
  720. i = fwd_info->num_pd - 2;
  721. if (i >= 0 && i < MAX_PERIPHERAL_UPD) {
  722. fwd_info->upd_diag_id[i].diagid_val =
  723. ctrl_pkt.diag_id;
  724. fwd_info->upd_diag_id[i].reg_str =
  725. process_name;
  726. fwd_info->upd_diag_id[i].pd = pd_val;
  727. }
  728. }
  729. }
  730. }
  731. DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
  732. "diag: peripheral = %d: diag_id string = %s,diag_id = %d\n",
  733. peripheral, process_name, ctrl_pkt.diag_id);
  734. ctrl_pkt.pkt_id = DIAG_CTRL_MSG_DIAGID;
  735. ctrl_pkt.version = 1;
  736. strlcpy((char *)&ctrl_pkt.process_name, process_name,
  737. strlen(process_name) + 1);
  738. ctrl_pkt.len = sizeof(ctrl_pkt.diag_id) + sizeof(ctrl_pkt.version) +
  739. strlen(process_name) + 1;
  740. err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, ctrl_pkt.len +
  741. sizeof(ctrl_pkt.pkt_id) + sizeof(ctrl_pkt.len));
  742. if (err && err != -ENODEV) {
  743. pr_err("diag: Unable to send diag id ctrl packet to peripheral %d, err: %d\n",
  744. peripheral, err);
  745. } else {
  746. /*
  747. * Masks (F3, logs and events) will be sent to
  748. * peripheral immediately following feature mask update only
  749. * if diag_id support is not present or
  750. * diag_id support is present and diag_id has been sent to
  751. * peripheral.
  752. * With diag_id being sent now, mask will be updated
  753. * to peripherals.
  754. */
  755. if (root_str) {
  756. driver->diag_id_sent[peripheral] = 1;
  757. queue_work(driver->cntl_wq, &driver->mask_update_work);
  758. }
  759. fwd_info = &peripheral_info[TYPE_DATA][peripheral];
  760. diagfwd_buffers_init(fwd_info);
  761. DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
  762. "diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
  763. driver->diag_id_sent[peripheral], peripheral,
  764. ctrl_pkt.diag_id, process_name);
  765. }
  766. }
  767. void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
  768. int len)
  769. {
  770. uint32_t read_len = 0;
  771. uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
  772. uint8_t *ptr = buf;
  773. struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
  774. if (!buf || len <= 0 || !p_info)
  775. return;
  776. if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
  777. pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
  778. p_info->peripheral);
  779. return;
  780. }
  781. while (read_len + header_len < len) {
  782. ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
  783. switch (ctrl_pkt->pkt_id) {
  784. case DIAG_CTRL_MSG_REG:
  785. process_command_registration(ptr, ctrl_pkt->len,
  786. p_info->peripheral);
  787. break;
  788. case DIAG_CTRL_MSG_DEREG:
  789. process_command_deregistration(ptr, ctrl_pkt->len,
  790. p_info->peripheral);
  791. break;
  792. case DIAG_CTRL_MSG_FEATURE:
  793. process_incoming_feature_mask(ptr, ctrl_pkt->len,
  794. p_info->peripheral);
  795. break;
  796. case DIAG_CTRL_MSG_LAST_EVENT_REPORT:
  797. process_last_event_report(ptr, ctrl_pkt->len,
  798. p_info->peripheral);
  799. break;
  800. case DIAG_CTRL_MSG_LOG_RANGE_REPORT:
  801. process_log_range_report(ptr, ctrl_pkt->len,
  802. p_info->peripheral);
  803. break;
  804. case DIAG_CTRL_MSG_SSID_RANGE_REPORT:
  805. process_ssid_range_report(ptr, ctrl_pkt->len,
  806. p_info->peripheral);
  807. break;
  808. case DIAG_CTRL_MSG_BUILD_MASK_REPORT:
  809. process_build_mask_report(ptr, ctrl_pkt->len,
  810. p_info->peripheral);
  811. break;
  812. case DIAG_CTRL_MSG_PD_STATUS:
  813. process_pd_status(ptr, ctrl_pkt->len,
  814. p_info->peripheral);
  815. break;
  816. case DIAG_CTRL_MSG_DIAGID:
  817. process_diagid(ptr, ctrl_pkt->len,
  818. p_info->peripheral);
  819. break;
  820. default:
  821. pr_debug("diag: Control packet %d not supported\n",
  822. ctrl_pkt->pkt_id);
  823. }
  824. ptr += header_len + ctrl_pkt->len;
  825. read_len += header_len + ctrl_pkt->len;
  826. }
  827. }
  828. static int diag_compute_real_time(int idx)
  829. {
  830. int real_time = MODE_REALTIME;
  831. if (driver->proc_active_mask == 0) {
  832. /*
  833. * There are no DCI or Memory Device processes. Diag should
  834. * be in Real Time mode irrespective of USB connection
  835. */
  836. real_time = MODE_REALTIME;
  837. } else if (driver->proc_rt_vote_mask[idx] & driver->proc_active_mask) {
  838. /*
  839. * Atleast one process is alive and is voting for Real Time
  840. * data - Diag should be in real time mode irrespective of USB
  841. * connection.
  842. */
  843. real_time = MODE_REALTIME;
  844. } else if (driver->usb_connected) {
  845. /*
  846. * If USB is connected, check individual process. If Memory
  847. * Device Mode is active, set the mode requested by Memory
  848. * Device process. Set to realtime mode otherwise.
  849. */
  850. if ((driver->proc_rt_vote_mask[idx] &
  851. DIAG_PROC_MEMORY_DEVICE) == 0)
  852. real_time = MODE_NONREALTIME;
  853. else
  854. real_time = MODE_REALTIME;
  855. } else {
  856. /*
  857. * We come here if USB is not connected and the active
  858. * processes are voting for Non realtime mode.
  859. */
  860. real_time = MODE_NONREALTIME;
  861. }
  862. return real_time;
  863. }
  864. static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
  865. uint8_t diag_id, int real_time)
  866. {
  867. struct diag_ctrl_msg_diagmode diagmode;
  868. struct diag_ctrl_msg_diagmode_v2 diagmode_v2;
  869. int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
  870. int msg_size_2 = sizeof(struct diag_ctrl_msg_diagmode_v2);
  871. if (!dest_buf)
  872. return;
  873. if (diag_id) {
  874. diagmode_v2.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
  875. diagmode_v2.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN_V2;
  876. diagmode_v2.version = 2;
  877. diagmode_v2.sleep_vote = real_time ? 1 : 0;
  878. /*
  879. * 0 - Disables real-time logging (to prevent
  880. * frequent APPS wake-ups, etc.).
  881. * 1 - Enable real-time logging
  882. */
  883. diagmode_v2.real_time = real_time;
  884. diagmode_v2.use_nrt_values = 0;
  885. diagmode_v2.commit_threshold = 0;
  886. diagmode_v2.sleep_threshold = 0;
  887. diagmode_v2.sleep_time = 0;
  888. diagmode_v2.drain_timer_val = 0;
  889. diagmode_v2.event_stale_timer_val = 0;
  890. diagmode_v2.diag_id = diag_id;
  891. memcpy(dest_buf, &diagmode_v2, msg_size_2);
  892. } else {
  893. diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
  894. diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
  895. diagmode.version = 1;
  896. diagmode.sleep_vote = real_time ? 1 : 0;
  897. /*
  898. * 0 - Disables real-time logging (to prevent
  899. * frequent APPS wake-ups, etc.).
  900. * 1 - Enable real-time logging
  901. */
  902. diagmode.real_time = real_time;
  903. diagmode.use_nrt_values = 0;
  904. diagmode.commit_threshold = 0;
  905. diagmode.sleep_threshold = 0;
  906. diagmode.sleep_time = 0;
  907. diagmode.drain_timer_val = 0;
  908. diagmode.event_stale_timer_val = 0;
  909. memcpy(dest_buf, &diagmode, msg_size);
  910. }
  911. }
  912. void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
  913. {
  914. int i;
  915. mutex_lock(&driver->real_time_mutex);
  916. if (vote)
  917. driver->proc_active_mask |= proc;
  918. else {
  919. driver->proc_active_mask &= ~proc;
  920. if (index == ALL_PROC) {
  921. for (i = 0; i < DIAG_NUM_PROC; i++)
  922. driver->proc_rt_vote_mask[i] |= proc;
  923. } else {
  924. driver->proc_rt_vote_mask[index] |= proc;
  925. }
  926. }
  927. mutex_unlock(&driver->real_time_mutex);
  928. }
  929. void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index)
  930. {
  931. int i;
  932. if (index >= DIAG_NUM_PROC) {
  933. pr_err("diag: In %s, invalid index %d\n", __func__, index);
  934. return;
  935. }
  936. mutex_lock(&driver->real_time_mutex);
  937. if (index == ALL_PROC) {
  938. for (i = 0; i < DIAG_NUM_PROC; i++) {
  939. if (real_time)
  940. driver->proc_rt_vote_mask[i] |= proc;
  941. else
  942. driver->proc_rt_vote_mask[i] &= ~proc;
  943. }
  944. } else {
  945. if (real_time)
  946. driver->proc_rt_vote_mask[index] |= proc;
  947. else
  948. driver->proc_rt_vote_mask[index] &= ~proc;
  949. }
  950. mutex_unlock(&driver->real_time_mutex);
  951. }
  952. #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
  953. static void diag_send_diag_mode_update_remote(int token, int real_time)
  954. {
  955. unsigned char *buf = NULL;
  956. int err = 0;
  957. struct diag_dci_header_t dci_header;
  958. int dci_header_size = sizeof(struct diag_dci_header_t);
  959. int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
  960. uint32_t write_len = 0;
  961. if (token < 0 || token >= NUM_DCI_PROC) {
  962. pr_err("diag: Invalid remote device channel in %s, token: %d\n",
  963. __func__, token);
  964. return;
  965. }
  966. if (real_time != MODE_REALTIME && real_time != MODE_NONREALTIME) {
  967. pr_err("diag: Invalid real time value in %s, type: %d\n",
  968. __func__, real_time);
  969. return;
  970. }
  971. buf = dci_get_buffer_from_bridge(token);
  972. if (!buf) {
  973. pr_err("diag: In %s, unable to get dci buffers to write data\n",
  974. __func__);
  975. return;
  976. }
  977. /* Frame the DCI header */
  978. dci_header.start = CONTROL_CHAR;
  979. dci_header.version = 1;
  980. dci_header.length = msg_size + 1;
  981. dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
  982. memcpy(buf + write_len, &dci_header, dci_header_size);
  983. write_len += dci_header_size;
  984. diag_create_diag_mode_ctrl_pkt(buf + write_len, 0, real_time);
  985. write_len += msg_size;
  986. *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
  987. write_len += sizeof(uint8_t);
  988. err = diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, write_len);
  989. if (err != write_len) {
  990. pr_err("diag: cannot send nrt mode ctrl pkt, err: %d\n", err);
  991. diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
  992. } else {
  993. driver->real_time_mode[token + 1] = real_time;
  994. }
  995. }
  996. #else
  997. static inline void diag_send_diag_mode_update_remote(int token, int real_time)
  998. {
  999. }
  1000. #endif
  1001. #ifdef CONFIG_DIAG_OVER_USB
  1002. void diag_real_time_work_fn(struct work_struct *work)
  1003. {
  1004. int temp_real_time = MODE_REALTIME, i, j;
  1005. uint8_t send_update = 1;
  1006. /*
  1007. * If any peripheral in the local processor is in either threshold or
  1008. * circular buffering mode, don't send the real time mode control
  1009. * packet.
  1010. */
  1011. for (i = 0; i < NUM_PERIPHERALS; i++) {
  1012. if (!driver->feature[i].peripheral_buffering)
  1013. continue;
  1014. switch (driver->buffering_mode[i].mode) {
  1015. case DIAG_BUFFERING_MODE_THRESHOLD:
  1016. case DIAG_BUFFERING_MODE_CIRCULAR:
  1017. send_update = 0;
  1018. break;
  1019. }
  1020. }
  1021. mutex_lock(&driver->mode_lock);
  1022. for (i = 0; i < DIAG_NUM_PROC; i++) {
  1023. temp_real_time = diag_compute_real_time(i);
  1024. if (temp_real_time == driver->real_time_mode[i]) {
  1025. pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
  1026. i, temp_real_time);
  1027. continue;
  1028. }
  1029. if (i == DIAG_LOCAL_PROC) {
  1030. if (!send_update) {
  1031. pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
  1032. __func__);
  1033. break;
  1034. }
  1035. for (j = 0; j < NUM_PERIPHERALS; j++)
  1036. diag_send_real_time_update(j,
  1037. temp_real_time);
  1038. } else {
  1039. diag_send_diag_mode_update_remote(i - 1,
  1040. temp_real_time);
  1041. }
  1042. }
  1043. mutex_unlock(&driver->mode_lock);
  1044. if (driver->real_time_update_busy > 0)
  1045. driver->real_time_update_busy--;
  1046. }
  1047. #else
  1048. void diag_real_time_work_fn(struct work_struct *work)
  1049. {
  1050. int temp_real_time = MODE_REALTIME, i, j;
  1051. for (i = 0; i < DIAG_NUM_PROC; i++) {
  1052. if (driver->proc_active_mask == 0) {
  1053. /*
  1054. * There are no DCI or Memory Device processes.
  1055. * Diag should be in Real Time mode.
  1056. */
  1057. temp_real_time = MODE_REALTIME;
  1058. } else if (!(driver->proc_rt_vote_mask[i] &
  1059. driver->proc_active_mask)) {
  1060. /* No active process is voting for real time mode */
  1061. temp_real_time = MODE_NONREALTIME;
  1062. }
  1063. if (temp_real_time == driver->real_time_mode[i]) {
  1064. pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
  1065. i, temp_real_time);
  1066. continue;
  1067. }
  1068. if (i == DIAG_LOCAL_PROC) {
  1069. for (j = 0; j < NUM_PERIPHERALS; j++)
  1070. diag_send_real_time_update(
  1071. j, temp_real_time);
  1072. } else {
  1073. diag_send_diag_mode_update_remote(i - 1,
  1074. temp_real_time);
  1075. }
  1076. }
  1077. if (driver->real_time_update_busy > 0)
  1078. driver->real_time_update_busy--;
  1079. }
  1080. #endif
  1081. static int __diag_send_real_time_update(uint8_t peripheral, int real_time,
  1082. uint8_t diag_id)
  1083. {
  1084. char buf[sizeof(struct diag_ctrl_msg_diagmode_v2)];
  1085. int msg_size = 0;
  1086. int err = 0;
  1087. if (peripheral >= NUM_PERIPHERALS) {
  1088. pr_err("diag: In %s, invalid peripheral %d\n", __func__,
  1089. peripheral);
  1090. return -EINVAL;
  1091. }
  1092. if (!driver->diagfwd_cntl[peripheral] ||
  1093. !driver->diagfwd_cntl[peripheral]->ch_open) {
  1094. pr_debug("diag: In %s, control channel is not open, p: %d\n",
  1095. __func__, peripheral);
  1096. return err;
  1097. }
  1098. if (real_time != MODE_NONREALTIME && real_time != MODE_REALTIME) {
  1099. pr_err("diag: In %s, invalid real time mode %d, peripheral: %d\n",
  1100. __func__, real_time, peripheral);
  1101. return -EINVAL;
  1102. }
  1103. msg_size = (diag_id ? sizeof(struct diag_ctrl_msg_diagmode_v2) :
  1104. sizeof(struct diag_ctrl_msg_diagmode));
  1105. diag_create_diag_mode_ctrl_pkt(buf, diag_id, real_time);
  1106. mutex_lock(&driver->diag_cntl_mutex);
  1107. err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
  1108. if (err && err != -ENODEV) {
  1109. pr_err("diag: In %s, unable to write, peripheral: %d, type: %d, len: %d, err: %d\n",
  1110. __func__, peripheral, TYPE_CNTL,
  1111. msg_size, err);
  1112. } else {
  1113. driver->real_time_mode[DIAG_LOCAL_PROC] = real_time;
  1114. }
  1115. mutex_unlock(&driver->diag_cntl_mutex);
  1116. return err;
  1117. }
  1118. int diag_send_real_time_update(uint8_t peripheral, int real_time)
  1119. {
  1120. int i;
  1121. for (i = 0; i < NUM_PERIPHERALS; i++) {
  1122. if (!driver->buffering_flag[i])
  1123. continue;
  1124. /*
  1125. * One of the peripherals is in buffering mode. Don't set
  1126. * the RT value.
  1127. */
  1128. return -EINVAL;
  1129. }
  1130. return __diag_send_real_time_update(peripheral, real_time, 0);
  1131. }
  1132. void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral)
  1133. {
  1134. if (!diag_search_diagid_by_pd(pd, (void *)diag_id,
  1135. (void *)peripheral)) {
  1136. *diag_id = 0;
  1137. if ((pd >= 0) && pd < NUM_PERIPHERALS)
  1138. *peripheral = pd;
  1139. else
  1140. *peripheral = -EINVAL;
  1141. }
  1142. if (*peripheral >= 0)
  1143. if (!driver->feature[*peripheral].pd_buffering)
  1144. *diag_id = 0;
  1145. }
  1146. int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
  1147. {
  1148. int err = 0;
  1149. int mode = MODE_REALTIME;
  1150. int peripheral = 0;
  1151. uint8_t diag_id = 0;
  1152. if (!params)
  1153. return -EIO;
  1154. diag_map_pd_to_diagid(params->peripheral,
  1155. &diag_id, &peripheral);
  1156. if ((peripheral < 0) ||
  1157. peripheral >= NUM_PERIPHERALS) {
  1158. pr_err("diag: In %s, invalid peripheral %d\n", __func__,
  1159. peripheral);
  1160. return -EINVAL;
  1161. }
  1162. if (!driver->buffering_flag[params->peripheral]) {
  1163. pr_err("diag: In %s, buffering flag not set for %d\n", __func__,
  1164. params->peripheral);
  1165. return -EINVAL;
  1166. }
  1167. if (!driver->feature[peripheral].peripheral_buffering) {
  1168. pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
  1169. __func__, peripheral);
  1170. return -EIO;
  1171. }
  1172. switch (params->mode) {
  1173. case DIAG_BUFFERING_MODE_STREAMING:
  1174. mode = MODE_REALTIME;
  1175. break;
  1176. case DIAG_BUFFERING_MODE_THRESHOLD:
  1177. case DIAG_BUFFERING_MODE_CIRCULAR:
  1178. mode = MODE_NONREALTIME;
  1179. break;
  1180. default:
  1181. pr_err("diag: In %s, invalid tx mode %d\n", __func__,
  1182. params->mode);
  1183. return -EINVAL;
  1184. }
  1185. if (!driver->feature[peripheral].peripheral_buffering) {
  1186. pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
  1187. __func__, peripheral);
  1188. driver->buffering_flag[params->peripheral] = 0;
  1189. return -EIO;
  1190. }
  1191. /*
  1192. * Perform sanity on watermark values. These values must be
  1193. * checked irrespective of the buffering mode.
  1194. */
  1195. if (((params->high_wm_val > DIAG_MAX_WM_VAL) ||
  1196. (params->low_wm_val > DIAG_MAX_WM_VAL)) ||
  1197. (params->low_wm_val > params->high_wm_val) ||
  1198. ((params->low_wm_val == params->high_wm_val) &&
  1199. (params->low_wm_val != DIAG_MIN_WM_VAL))) {
  1200. pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
  1201. __func__, params->high_wm_val, params->low_wm_val,
  1202. params->peripheral);
  1203. return -EINVAL;
  1204. }
  1205. mutex_lock(&driver->mode_lock);
  1206. err = diag_send_buffering_tx_mode_pkt(peripheral, diag_id, params);
  1207. if (err) {
  1208. pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
  1209. __func__, peripheral, err);
  1210. goto fail;
  1211. }
  1212. err = diag_send_buffering_wm_values(peripheral, diag_id, params);
  1213. if (err) {
  1214. pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
  1215. __func__, peripheral, err);
  1216. goto fail;
  1217. }
  1218. err = __diag_send_real_time_update(peripheral, mode, diag_id);
  1219. if (err) {
  1220. pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
  1221. __func__, peripheral, mode, err);
  1222. goto fail;
  1223. }
  1224. driver->buffering_mode[params->peripheral].peripheral =
  1225. params->peripheral;
  1226. driver->buffering_mode[params->peripheral].mode =
  1227. params->mode;
  1228. driver->buffering_mode[params->peripheral].low_wm_val =
  1229. params->low_wm_val;
  1230. driver->buffering_mode[params->peripheral].high_wm_val =
  1231. params->high_wm_val;
  1232. if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
  1233. driver->buffering_flag[params->peripheral] = 0;
  1234. fail:
  1235. mutex_unlock(&driver->mode_lock);
  1236. return err;
  1237. }
  1238. int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
  1239. {
  1240. struct diag_ctrl_msg_stm stm_msg;
  1241. int msg_size = sizeof(struct diag_ctrl_msg_stm);
  1242. int err = 0;
  1243. if (peripheral >= NUM_PERIPHERALS)
  1244. return -EIO;
  1245. if (!driver->diagfwd_cntl[peripheral] ||
  1246. !driver->diagfwd_cntl[peripheral]->ch_open) {
  1247. pr_debug("diag: In %s, control channel is not open, p: %d\n",
  1248. __func__, peripheral);
  1249. return -ENODEV;
  1250. }
  1251. if (driver->feature[peripheral].stm_support == DISABLE_STM)
  1252. return -EINVAL;
  1253. stm_msg.ctrl_pkt_id = 21;
  1254. stm_msg.ctrl_pkt_data_len = 5;
  1255. stm_msg.version = 1;
  1256. stm_msg.control_data = stm_control_data;
  1257. err = diagfwd_write(peripheral, TYPE_CNTL, &stm_msg, msg_size);
  1258. if (err && err != -ENODEV) {
  1259. pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
  1260. __func__, peripheral, TYPE_CNTL,
  1261. msg_size, err);
  1262. }
  1263. return err;
  1264. }
  1265. int diag_send_peripheral_drain_immediate(uint8_t pd,
  1266. uint8_t diag_id, int peripheral)
  1267. {
  1268. int err = 0;
  1269. struct diag_ctrl_drain_immediate ctrl_pkt;
  1270. struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
  1271. if (!driver->feature[peripheral].peripheral_buffering) {
  1272. pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
  1273. __func__, peripheral);
  1274. return -EINVAL;
  1275. }
  1276. if (!driver->diagfwd_cntl[peripheral] ||
  1277. !driver->diagfwd_cntl[peripheral]->ch_open) {
  1278. pr_debug("diag: In %s, control channel is not open, p: %d\n",
  1279. __func__, peripheral);
  1280. return -ENODEV;
  1281. }
  1282. if (diag_id && driver->feature[peripheral].pd_buffering) {
  1283. ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
  1284. /*
  1285. * The length of the ctrl pkt is size of version,
  1286. * diag_id and stream id
  1287. */
  1288. ctrl_pkt_v2.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
  1289. ctrl_pkt_v2.version = 2;
  1290. ctrl_pkt_v2.diag_id = diag_id;
  1291. ctrl_pkt_v2.stream_id = 1;
  1292. err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
  1293. sizeof(ctrl_pkt_v2));
  1294. if (err && err != -ENODEV) {
  1295. pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
  1296. peripheral, err);
  1297. }
  1298. } else {
  1299. ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
  1300. /*
  1301. * The length of the ctrl pkt is
  1302. * size of version and stream id
  1303. */
  1304. ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
  1305. ctrl_pkt.version = 1;
  1306. ctrl_pkt.stream_id = 1;
  1307. err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
  1308. sizeof(ctrl_pkt));
  1309. if (err && err != -ENODEV) {
  1310. pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
  1311. peripheral, err);
  1312. }
  1313. }
  1314. return err;
  1315. }
  1316. int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
  1317. uint8_t diag_id, struct diag_buffering_mode_t *params)
  1318. {
  1319. int err = 0;
  1320. struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
  1321. struct diag_ctrl_peripheral_tx_mode_v2 ctrl_pkt_v2;
  1322. if (!params)
  1323. return -EIO;
  1324. if (peripheral >= NUM_PERIPHERALS) {
  1325. pr_err("diag: In %s, invalid peripheral %d\n", __func__,
  1326. peripheral);
  1327. return -EINVAL;
  1328. }
  1329. if (!driver->feature[peripheral].peripheral_buffering) {
  1330. pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
  1331. __func__, peripheral);
  1332. return -EINVAL;
  1333. }
  1334. switch (params->mode) {
  1335. case DIAG_BUFFERING_MODE_STREAMING:
  1336. case DIAG_BUFFERING_MODE_THRESHOLD:
  1337. case DIAG_BUFFERING_MODE_CIRCULAR:
  1338. break;
  1339. default:
  1340. pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
  1341. params->mode);
  1342. return -EINVAL;
  1343. }
  1344. if (diag_id &&
  1345. driver->feature[peripheral].pd_buffering) {
  1346. ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
  1347. /*
  1348. * Control packet length is size of version, diag_id,
  1349. * stream_id and tx_mode
  1350. */
  1351. ctrl_pkt_v2.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
  1352. ctrl_pkt_v2.version = 2;
  1353. ctrl_pkt_v2.diag_id = diag_id;
  1354. ctrl_pkt_v2.stream_id = 1;
  1355. ctrl_pkt_v2.tx_mode = params->mode;
  1356. err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
  1357. sizeof(ctrl_pkt_v2));
  1358. if (err && err != -ENODEV) {
  1359. pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
  1360. peripheral, err);
  1361. goto fail;
  1362. }
  1363. } else {
  1364. ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
  1365. /*
  1366. * Control packet length is size of version,
  1367. * stream_id and tx_mode
  1368. */
  1369. ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
  1370. ctrl_pkt.version = 1;
  1371. ctrl_pkt.stream_id = 1;
  1372. ctrl_pkt.tx_mode = params->mode;
  1373. err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
  1374. sizeof(ctrl_pkt));
  1375. if (err && err != -ENODEV) {
  1376. pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
  1377. peripheral, err);
  1378. goto fail;
  1379. }
  1380. }
  1381. driver->buffering_mode[params->peripheral].mode = params->mode;
  1382. fail:
  1383. return err;
  1384. }
  1385. int diag_send_buffering_wm_values(uint8_t peripheral,
  1386. uint8_t diag_id, struct diag_buffering_mode_t *params)
  1387. {
  1388. int err = 0;
  1389. struct diag_ctrl_set_wq_val ctrl_pkt;
  1390. struct diag_ctrl_set_wq_val_v2 ctrl_pkt_v2;
  1391. if (!params)
  1392. return -EIO;
  1393. if (peripheral >= NUM_PERIPHERALS) {
  1394. pr_err("diag: In %s, invalid peripheral %d\n", __func__,
  1395. peripheral);
  1396. return -EINVAL;
  1397. }
  1398. if (!driver->feature[peripheral].peripheral_buffering) {
  1399. pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
  1400. __func__, peripheral);
  1401. return -EINVAL;
  1402. }
  1403. if (!driver->diagfwd_cntl[peripheral] ||
  1404. !driver->diagfwd_cntl[peripheral]->ch_open) {
  1405. pr_debug("diag: In %s, control channel is not open, p: %d\n",
  1406. __func__, peripheral);
  1407. return -ENODEV;
  1408. }
  1409. switch (params->mode) {
  1410. case DIAG_BUFFERING_MODE_STREAMING:
  1411. case DIAG_BUFFERING_MODE_THRESHOLD:
  1412. case DIAG_BUFFERING_MODE_CIRCULAR:
  1413. break;
  1414. default:
  1415. pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
  1416. params->mode);
  1417. return -EINVAL;
  1418. }
  1419. if (diag_id &&
  1420. driver->feature[peripheral].pd_buffering) {
  1421. ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
  1422. /*
  1423. * Control packet length is size of version, diag_id,
  1424. * stream_id and wmq values
  1425. */
  1426. ctrl_pkt_v2.len = sizeof(uint32_t) + (4 * sizeof(uint8_t));
  1427. ctrl_pkt_v2.version = 2;
  1428. ctrl_pkt_v2.diag_id = diag_id;
  1429. ctrl_pkt_v2.stream_id = 1;
  1430. ctrl_pkt_v2.high_wm_val = params->high_wm_val;
  1431. ctrl_pkt_v2.low_wm_val = params->low_wm_val;
  1432. err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
  1433. sizeof(ctrl_pkt_v2));
  1434. if (err && err != -ENODEV) {
  1435. pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
  1436. peripheral, err);
  1437. }
  1438. } else {
  1439. ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
  1440. /*
  1441. * Control packet length is size of version,
  1442. * stream_id and wmq values
  1443. */
  1444. ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
  1445. ctrl_pkt.version = 1;
  1446. ctrl_pkt.stream_id = 1;
  1447. ctrl_pkt.high_wm_val = params->high_wm_val;
  1448. ctrl_pkt.low_wm_val = params->low_wm_val;
  1449. err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
  1450. sizeof(ctrl_pkt));
  1451. if (err && err != -ENODEV) {
  1452. pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
  1453. peripheral, err);
  1454. }
  1455. }
  1456. return err;
  1457. }
  1458. int diagfwd_cntl_init(void)
  1459. {
  1460. uint8_t peripheral = 0;
  1461. reg_dirty = 0;
  1462. driver->polling_reg_flag = 0;
  1463. driver->log_on_demand_support = 1;
  1464. driver->stm_peripheral = 0;
  1465. driver->close_transport = 0;
  1466. for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
  1467. driver->buffering_flag[peripheral] = 0;
  1468. mutex_init(&driver->cntl_lock);
  1469. INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
  1470. INIT_WORK(&(driver->mask_update_work), diag_mask_update_work_fn);
  1471. INIT_WORK(&(driver->close_transport_work),
  1472. diag_close_transport_work_fn);
  1473. driver->cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
  1474. if (!driver->cntl_wq)
  1475. return -ENOMEM;
  1476. return 0;
  1477. }
  1478. void diagfwd_cntl_channel_init(void)
  1479. {
  1480. uint8_t peripheral;
  1481. for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
  1482. diagfwd_early_open(peripheral);
  1483. diagfwd_open(peripheral, TYPE_CNTL);
  1484. }
  1485. }
  1486. void diagfwd_cntl_exit(void)
  1487. {
  1488. if (driver->cntl_wq)
  1489. destroy_workqueue(driver->cntl_wq);
  1490. }