rdbg.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175
  1. /*
  2. * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/cdev.h>
  15. #include <linux/device.h>
  16. #include <linux/fs.h>
  17. #include <linux/slab.h>
  18. #include <linux/module.h>
  19. #include <linux/of_gpio.h>
  20. #include <soc/qcom/smem.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/interrupt.h>
  23. #define SMP2P_NUM_PROCS 16
  24. #define MAX_RETRIES 20
  25. #define SM_VERSION 1
  26. #define SM_BLOCKSIZE 128
  27. #define SMQ_MAGIC_INIT 0xFF00FF00
  28. #define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1)
  29. #define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2)
  30. enum SMQ_STATUS {
  31. SMQ_SUCCESS = 0,
  32. SMQ_ENOMEMORY = -1,
  33. SMQ_EBADPARM = -2,
  34. SMQ_UNDERFLOW = -3,
  35. SMQ_OVERFLOW = -4
  36. };
  37. enum smq_type {
  38. PRODUCER = 1,
  39. CONSUMER = 2,
  40. INVALID = 3
  41. };
  42. struct smq_block_map {
  43. uint32_t index_read;
  44. uint32_t num_blocks;
  45. uint8_t *map;
  46. };
  47. struct smq_node {
  48. uint16_t index_block;
  49. uint16_t num_blocks;
  50. } __attribute__ ((__packed__));
  51. struct smq_hdr {
  52. uint8_t producer_version;
  53. uint8_t consumer_version;
  54. } __attribute__ ((__packed__));
  55. struct smq_out_state {
  56. uint32_t init;
  57. uint32_t index_check_queue_for_reset;
  58. uint32_t index_sent_write;
  59. uint32_t index_free_read;
  60. } __attribute__ ((__packed__));
  61. struct smq_out {
  62. struct smq_out_state s;
  63. struct smq_node sent[1];
  64. };
  65. struct smq_in_state {
  66. uint32_t init;
  67. uint32_t index_check_queue_for_reset_ack;
  68. uint32_t index_sent_read;
  69. uint32_t index_free_write;
  70. } __attribute__ ((__packed__));
  71. struct smq_in {
  72. struct smq_in_state s;
  73. struct smq_node free[1];
  74. };
  75. struct smq {
  76. struct smq_hdr *hdr;
  77. struct smq_out *out;
  78. struct smq_in *in;
  79. uint8_t *blocks;
  80. uint32_t num_blocks;
  81. struct mutex *lock;
  82. uint32_t initialized;
  83. struct smq_block_map block_map;
  84. enum smq_type type;
  85. };
  86. struct gpio_info {
  87. int gpio_base_id;
  88. int irq_base_id;
  89. };
  90. struct rdbg_data {
  91. struct device *device;
  92. struct completion work;
  93. struct gpio_info in;
  94. struct gpio_info out;
  95. bool device_initialized;
  96. int gpio_out_offset;
  97. bool device_opened;
  98. void *smem_addr;
  99. size_t smem_size;
  100. struct smq producer_smrb;
  101. struct smq consumer_smrb;
  102. struct mutex write_mutex;
  103. };
  104. struct rdbg_device {
  105. struct cdev cdev;
  106. struct class *class;
  107. dev_t dev_no;
  108. int num_devices;
  109. struct rdbg_data *rdbg_data;
  110. };
  111. static struct rdbg_device g_rdbg_instance = {
  112. { {0} },
  113. NULL,
  114. 0,
  115. SMP2P_NUM_PROCS,
  116. NULL
  117. };
  118. struct processor_specific_info {
  119. char *name;
  120. unsigned int smem_buffer_addr;
  121. size_t smem_buffer_size;
  122. };
  123. static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
  124. {0}, /*APPS*/
  125. {"rdbg_modem", 0, 0}, /*MODEM*/
  126. {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
  127. {0}, /*SMP2P_RESERVED_PROC_1*/
  128. {"rdbg_wcnss", 0, 0}, /*WCNSS*/
  129. {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/
  130. {NULL}, /*SMP2P_POWER_PROC*/
  131. {NULL}, /*SMP2P_TZ_PROC*/
  132. {NULL}, /*EMPTY*/
  133. {NULL}, /*EMPTY*/
  134. {NULL}, /*EMPTY*/
  135. {NULL}, /*EMPTY*/
  136. {NULL}, /*EMPTY*/
  137. {NULL}, /*EMPTY*/
  138. {NULL}, /*EMPTY*/
  139. {NULL} /*SMP2P_REMOTE_MOCK_PROC*/
  140. };
  141. static int smq_blockmap_get(struct smq_block_map *block_map,
  142. uint32_t *block_index, uint32_t n)
  143. {
  144. uint32_t start;
  145. uint32_t mark = 0;
  146. uint32_t found = 0;
  147. uint32_t i = 0;
  148. start = block_map->index_read;
  149. if (n == 1) {
  150. do {
  151. if (!block_map->map[block_map->index_read]) {
  152. *block_index = block_map->index_read;
  153. block_map->map[block_map->index_read] = 1;
  154. block_map->index_read++;
  155. block_map->index_read %= block_map->num_blocks;
  156. return SMQ_SUCCESS;
  157. }
  158. block_map->index_read++;
  159. } while (start != (block_map->index_read %=
  160. block_map->num_blocks));
  161. } else {
  162. mark = block_map->num_blocks;
  163. do {
  164. if (!block_map->map[block_map->index_read]) {
  165. if (mark > block_map->index_read) {
  166. mark = block_map->index_read;
  167. start = block_map->index_read;
  168. found = 0;
  169. }
  170. found++;
  171. if (found == n) {
  172. *block_index = mark;
  173. for (i = 0; i < n; i++)
  174. block_map->map[mark + i] =
  175. (uint8_t)(n - i);
  176. block_map->index_read += block_map->map
  177. [block_map->index_read] - 1;
  178. return SMQ_SUCCESS;
  179. }
  180. } else {
  181. found = 0;
  182. block_map->index_read += block_map->map
  183. [block_map->index_read] - 1;
  184. mark = block_map->num_blocks;
  185. }
  186. block_map->index_read++;
  187. } while (start != (block_map->index_read %=
  188. block_map->num_blocks));
  189. }
  190. return SMQ_ENOMEMORY;
  191. }
  192. static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
  193. {
  194. uint32_t num_blocks = block_map->map[i];
  195. while (num_blocks--) {
  196. block_map->map[i] = 0;
  197. i++;
  198. }
  199. }
  200. static int smq_blockmap_reset(struct smq_block_map *block_map)
  201. {
  202. if (!block_map->map)
  203. return SMQ_ENOMEMORY;
  204. memset(block_map->map, 0, block_map->num_blocks + 1);
  205. block_map->index_read = 0;
  206. return SMQ_SUCCESS;
  207. }
  208. static int smq_blockmap_ctor(struct smq_block_map *block_map,
  209. uint32_t num_blocks)
  210. {
  211. if (num_blocks <= 1)
  212. return SMQ_ENOMEMORY;
  213. block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
  214. if (!block_map->map)
  215. return SMQ_ENOMEMORY;
  216. block_map->num_blocks = num_blocks - 1;
  217. smq_blockmap_reset(block_map);
  218. return SMQ_SUCCESS;
  219. }
  220. static void smq_blockmap_dtor(struct smq_block_map *block_map)
  221. {
  222. kfree(block_map->map);
  223. block_map->map = NULL;
  224. }
  225. static int smq_free(struct smq *smq, void *data)
  226. {
  227. struct smq_node node;
  228. uint32_t index_block;
  229. int err = SMQ_SUCCESS;
  230. if (smq->lock)
  231. mutex_lock(smq->lock);
  232. if ((smq->hdr->producer_version != SM_VERSION) &&
  233. (smq->out->s.init != SMQ_MAGIC_PRODUCER)) {
  234. err = SMQ_UNDERFLOW;
  235. goto bail;
  236. }
  237. index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
  238. if (index_block >= smq->num_blocks) {
  239. err = SMQ_EBADPARM;
  240. goto bail;
  241. }
  242. node.index_block = (uint16_t)index_block;
  243. node.num_blocks = 0;
  244. *((struct smq_node *)(smq->in->free + smq->in->
  245. s.index_free_write)) = node;
  246. smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
  247. % smq->num_blocks;
  248. bail:
  249. if (smq->lock)
  250. mutex_unlock(smq->lock);
  251. return err;
  252. }
  253. static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
  254. {
  255. struct smq_node *node;
  256. int err = SMQ_SUCCESS;
  257. int more = 0;
  258. if ((smq->hdr->producer_version != SM_VERSION) &&
  259. (smq->out->s.init != SMQ_MAGIC_PRODUCER))
  260. return SMQ_UNDERFLOW;
  261. if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
  262. err = SMQ_UNDERFLOW;
  263. goto bail;
  264. }
  265. node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
  266. if (node->index_block >= smq->num_blocks) {
  267. err = SMQ_EBADPARM;
  268. goto bail;
  269. }
  270. smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
  271. % smq->num_blocks;
  272. *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
  273. *pnsize = SM_BLOCKSIZE * node->num_blocks;
  274. /*
  275. * Ensure that the reads and writes are updated in the memory
  276. * when they are done and not cached. Also, ensure that the reads
  277. * and writes are not reordered as they are shared between two cores.
  278. */
  279. rmb();
  280. if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
  281. more = 1;
  282. bail:
  283. *pbmore = more;
  284. return err;
  285. }
  286. static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
  287. {
  288. void *pv = 0;
  289. int num_blocks;
  290. uint32_t index_block = 0;
  291. int err = SMQ_SUCCESS;
  292. struct smq_node *node = NULL;
  293. mutex_lock(smq->lock);
  294. if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) &&
  295. (smq->hdr->consumer_version == SM_VERSION)) {
  296. if (smq->out->s.index_check_queue_for_reset ==
  297. smq->in->s.index_check_queue_for_reset_ack) {
  298. while (smq->out->s.index_free_read !=
  299. smq->in->s.index_free_write) {
  300. node = (struct smq_node *)(
  301. smq->in->free +
  302. smq->out->s.index_free_read);
  303. if (node->index_block >= smq->num_blocks) {
  304. err = SMQ_EBADPARM;
  305. goto bail;
  306. }
  307. smq->out->s.index_free_read =
  308. (smq->out->s.index_free_read + 1)
  309. % smq->num_blocks;
  310. smq_blockmap_put(&smq->block_map,
  311. node->index_block);
  312. /*
  313. * Ensure that the reads and writes are
  314. * updated in the memory when they are done
  315. * and not cached. Also, ensure that the reads
  316. * and writes are not reordered as they are
  317. * shared between two cores.
  318. */
  319. rmb();
  320. }
  321. }
  322. }
  323. num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
  324. err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
  325. if (err != SMQ_SUCCESS)
  326. goto bail;
  327. pv = smq->blocks + (SM_BLOCKSIZE * index_block);
  328. err = copy_from_user((void *)pv, (void *)pcb, nsize);
  329. if (err != 0)
  330. goto bail;
  331. ((struct smq_node *)(smq->out->sent +
  332. smq->out->s.index_sent_write))->index_block
  333. = (uint16_t)index_block;
  334. ((struct smq_node *)(smq->out->sent +
  335. smq->out->s.index_sent_write))->num_blocks
  336. = (uint16_t)num_blocks;
  337. smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
  338. % smq->num_blocks;
  339. bail:
  340. if (err != SMQ_SUCCESS) {
  341. if (pv)
  342. smq_blockmap_put(&smq->block_map, index_block);
  343. }
  344. mutex_unlock(smq->lock);
  345. return err;
  346. }
  347. static int smq_reset_producer_queue_internal(struct smq *smq,
  348. uint32_t reset_num)
  349. {
  350. int retval = 0;
  351. uint32_t i;
  352. if (smq->type != PRODUCER)
  353. goto bail;
  354. mutex_lock(smq->lock);
  355. if (smq->out->s.index_check_queue_for_reset != reset_num) {
  356. smq->out->s.index_check_queue_for_reset = reset_num;
  357. for (i = 0; i < smq->num_blocks; i++)
  358. (smq->out->sent + i)->index_block = 0xFFFF;
  359. smq_blockmap_reset(&smq->block_map);
  360. smq->out->s.index_sent_write = 0;
  361. smq->out->s.index_free_read = 0;
  362. retval = 1;
  363. }
  364. mutex_unlock(smq->lock);
  365. bail:
  366. return retval;
  367. }
  368. static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
  369. {
  370. int retval = 0;
  371. uint32_t reset_num, i;
  372. if ((p_cons->type != CONSUMER) ||
  373. (p_cons->out->s.init != SMQ_MAGIC_PRODUCER) ||
  374. (p_cons->hdr->producer_version != SM_VERSION))
  375. goto bail;
  376. reset_num = p_cons->out->s.index_check_queue_for_reset;
  377. if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
  378. p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
  379. for (i = 0; i < p_cons->num_blocks; i++)
  380. (p_cons->in->free + i)->index_block = 0xFFFF;
  381. p_cons->in->s.index_sent_read = 0;
  382. p_cons->in->s.index_free_write = 0;
  383. retval = smq_reset_producer_queue_internal(p_prod, reset_num);
  384. }
  385. bail:
  386. return retval;
  387. }
  388. static int check_subsystem_debug_enabled(void *base_addr, int size)
  389. {
  390. int num_blocks;
  391. uint8_t *pb_orig;
  392. uint8_t *pb;
  393. struct smq smq;
  394. int err = 0;
  395. pb = pb_orig = (uint8_t *)base_addr;
  396. pb += sizeof(struct smq_hdr);
  397. pb = PTR_ALIGN(pb, 8);
  398. size -= pb - (uint8_t *)pb_orig;
  399. num_blocks = (int)((size - sizeof(struct smq_out_state) -
  400. sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
  401. sizeof(struct smq_node) * 2));
  402. if (num_blocks <= 0) {
  403. err = SMQ_EBADPARM;
  404. goto bail;
  405. }
  406. pb += num_blocks * SM_BLOCKSIZE;
  407. smq.out = (struct smq_out *)pb;
  408. pb += sizeof(struct smq_out_state) + (num_blocks *
  409. sizeof(struct smq_node));
  410. smq.in = (struct smq_in *)pb;
  411. if (smq.in->s.init != SMQ_MAGIC_CONSUMER) {
  412. pr_err("%s, smq in consumer not initialized", __func__);
  413. err = -ECOMM;
  414. }
  415. bail:
  416. return err;
  417. }
  418. static void smq_dtor(struct smq *smq)
  419. {
  420. if (smq->initialized == SMQ_MAGIC_INIT) {
  421. switch (smq->type) {
  422. case PRODUCER:
  423. smq->out->s.init = 0;
  424. smq_blockmap_dtor(&smq->block_map);
  425. break;
  426. case CONSUMER:
  427. smq->in->s.init = 0;
  428. break;
  429. default:
  430. case INVALID:
  431. break;
  432. }
  433. smq->initialized = 0;
  434. }
  435. }
  436. /*
  437. * The shared memory is used as a circular ring buffer in each direction.
  438. * Thus we have a bi-directional shared memory channel between the AP
  439. * and a subsystem. We call this SMQ. Each memory channel contains a header,
  440. * data and a control mechanism that is used to synchronize read and write
  441. * of data between the AP and the remote subsystem.
  442. *
  443. * Overall SMQ memory view:
  444. *
  445. * +------------------------------------------------+
  446. * | SMEM buffer |
  447. * |-----------------------+------------------------|
  448. * |Producer: LA | Producer: Remote |
  449. * |Consumer: Remote | subsystem |
  450. * | subsystem | Consumer: LA |
  451. * | | |
  452. * | Producer| Consumer|
  453. * +-----------------------+------------------------+
  454. * | |
  455. * | |
  456. * | +--------------------------------------+
  457. * | |
  458. * | |
  459. * v v
  460. * +--------------------------------------------------------------+
  461. * | Header | Data | Control |
  462. * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
  463. * | | b | b | b | | S |n |n | | S |n |n | |
  464. * | Producer | l | l | l | | M |o |o | | M |o |o | |
  465. * | Ver | o | o | o | | Q |d |d | | Q |d |d | |
  466. * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... |
  467. * | | k | k | k | | O | | | | I | | | |
  468. * | Consumer | | | | | u |0 |1 | | n |0 |1 | |
  469. * | Ver | 0 | 1 | 2 | | t | | | | | | | |
  470. * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
  471. * | |
  472. * + |
  473. * |
  474. * +------------------------+
  475. * |
  476. * v
  477. * +----+----+----+----+
  478. * | SMQ Nodes |
  479. * |----|----|----|----|
  480. * Node # | 0 | 1 | 2 | ...|
  481. * |----|----|----|----|
  482. * Starting Block Index # | 0 | 3 | 8 | ...|
  483. * |----|----|----|----|
  484. * # of blocks | 3 | 5 | 1 | ...|
  485. * +----+----+----+----+
  486. *
  487. * Header: Contains version numbers for software compatibility to ensure
  488. * that both producers and consumers on the AP and subsystems know how to
  489. * read from and write to the queue.
  490. * Both the producer and consumer versions are 1.
  491. * +---------+-------------------+
  492. * | Size | Field |
  493. * +---------+-------------------+
  494. * | 1 byte | Producer Version |
  495. * +---------+-------------------+
  496. * | 1 byte | Consumer Version |
  497. * +---------+-------------------+
  498. *
  499. * Data: The data portion contains multiple blocks [0..N] of a fixed size.
  500. * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
  501. * Payload sent from the debug agent app is split (if necessary) and placed
  502. * in these blocks. The first data block is placed at the next 8 byte aligned
  503. * address after the header.
  504. *
  505. * The number of blocks for a given SMEM allocation is derived as follows:
  506. * Number of Blocks = ((Total Size - Alignment - Size of Header
  507. * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
  508. *
  509. * The producer maintains a private block map of each of these blocks to
  510. * determine which of these blocks in the queue is available and which are free.
  511. *
  512. * Control:
  513. * The control portion contains a list of nodes [0..N] where N is number
  514. * of available data blocks. Each node identifies the data
  515. * block indexes that contain a particular debug message to be transferred,
  516. * and the number of blocks it took to hold the contents of the message.
  517. *
  518. * Each node has the following structure:
  519. * +---------+-------------------+
  520. * | Size | Field |
  521. * +---------+-------------------+
  522. * | 2 bytes |Staring Block Index|
  523. * +---------+-------------------+
  524. * | 2 bytes |Number of Blocks |
  525. * +---------+-------------------+
  526. *
  527. * The producer and the consumer update different parts of the control channel
  528. * (SMQOut / SMQIn) respectively. Each of these control data structures contains
  529. * information about the last node that was written / read, and the actual nodes
  530. * that were written/read.
  531. *
  532. * SMQOut Structure (R/W by producer, R by consumer):
  533. * +---------+-------------------+
  534. * | Size | Field |
  535. * +---------+-------------------+
  536. * | 4 bytes | Magic Init Number |
  537. * +---------+-------------------+
  538. * | 4 bytes | Reset |
  539. * +---------+-------------------+
  540. * | 4 bytes | Last Sent Index |
  541. * +---------+-------------------+
  542. * | 4 bytes | Index Free Read |
  543. * +---------+-------------------+
  544. *
  545. * SMQIn Structure (R/W by consumer, R by producer):
  546. * +---------+-------------------+
  547. * | Size | Field |
  548. * +---------+-------------------+
  549. * | 4 bytes | Magic Init Number |
  550. * +---------+-------------------+
  551. * | 4 bytes | Reset ACK |
  552. * +---------+-------------------+
  553. * | 4 bytes | Last Read Index |
  554. * +---------+-------------------+
  555. * | 4 bytes | Index Free Write |
  556. * +---------+-------------------+
  557. *
  558. * Magic Init Number:
  559. * Both SMQ Out and SMQ In initialize this field with a predefined magic
  560. * number so as to make sure that both the consumer and producer blocks
  561. * have fully initialized and have valid data in the shared memory control area.
  562. * Producer Magic #: 0xFF00FF01
  563. * Consumer Magic #: 0xFF00FF02
  564. */
  565. static int smq_ctor(struct smq *smq, void *base_addr, int size,
  566. enum smq_type type, struct mutex *lock_ptr)
  567. {
  568. int num_blocks;
  569. uint8_t *pb_orig;
  570. uint8_t *pb;
  571. uint32_t i;
  572. int err;
  573. if (smq->initialized == SMQ_MAGIC_INIT) {
  574. err = SMQ_EBADPARM;
  575. goto bail;
  576. }
  577. if (!base_addr || !size) {
  578. err = SMQ_EBADPARM;
  579. goto bail;
  580. }
  581. if (type == PRODUCER)
  582. smq->lock = lock_ptr;
  583. pb_orig = (uint8_t *)base_addr;
  584. smq->hdr = (struct smq_hdr *)pb_orig;
  585. pb = pb_orig;
  586. pb += sizeof(struct smq_hdr);
  587. pb = PTR_ALIGN(pb, 8);
  588. size -= pb - (uint8_t *)pb_orig;
  589. num_blocks = (int)((size - sizeof(struct smq_out_state) -
  590. sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
  591. sizeof(struct smq_node) * 2));
  592. if (num_blocks <= 0) {
  593. err = SMQ_ENOMEMORY;
  594. goto bail;
  595. }
  596. smq->blocks = pb;
  597. smq->num_blocks = num_blocks;
  598. pb += num_blocks * SM_BLOCKSIZE;
  599. smq->out = (struct smq_out *)pb;
  600. pb += sizeof(struct smq_out_state) + (num_blocks *
  601. sizeof(struct smq_node));
  602. smq->in = (struct smq_in *)pb;
  603. smq->type = type;
  604. if (type == PRODUCER) {
  605. smq->hdr->producer_version = SM_VERSION;
  606. for (i = 0; i < smq->num_blocks; i++)
  607. (smq->out->sent + i)->index_block = 0xFFFF;
  608. err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
  609. if (err != SMQ_SUCCESS)
  610. goto bail;
  611. smq->out->s.index_sent_write = 0;
  612. smq->out->s.index_free_read = 0;
  613. if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
  614. smq->out->s.index_check_queue_for_reset += 1;
  615. } else {
  616. smq->out->s.index_check_queue_for_reset = 1;
  617. smq->out->s.init = SMQ_MAGIC_PRODUCER;
  618. }
  619. } else {
  620. smq->hdr->consumer_version = SM_VERSION;
  621. for (i = 0; i < smq->num_blocks; i++)
  622. (smq->in->free + i)->index_block = 0xFFFF;
  623. smq->in->s.index_sent_read = 0;
  624. smq->in->s.index_free_write = 0;
  625. if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
  626. smq->in->s.index_check_queue_for_reset_ack =
  627. smq->out->s.index_check_queue_for_reset;
  628. } else {
  629. smq->in->s.index_check_queue_for_reset_ack = 0;
  630. }
  631. smq->in->s.init = SMQ_MAGIC_CONSUMER;
  632. }
  633. smq->initialized = SMQ_MAGIC_INIT;
  634. err = SMQ_SUCCESS;
  635. bail:
  636. return err;
  637. }
  638. static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
  639. {
  640. int offset = rdbgdata->gpio_out_offset;
  641. int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
  642. gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
  643. rdbgdata->gpio_out_offset = (offset + 1) % 32;
  644. dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
  645. __func__, val);
  646. }
  647. static irqreturn_t on_interrupt_from(int irq, void *ptr)
  648. {
  649. struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
  650. dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
  651. __func__, irq);
  652. complete(&(rdbgdata->work));
  653. return IRQ_HANDLED;
  654. }
  655. static int initialize_smq(struct rdbg_data *rdbgdata)
  656. {
  657. int err = 0;
  658. unsigned char *smem_consumer_buffer = rdbgdata->smem_addr;
  659. smem_consumer_buffer += (rdbgdata->smem_size/2);
  660. if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
  661. ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
  662. dev_err(rdbgdata->device, "%s: smq producer allocation failed",
  663. __func__);
  664. err = -ENOMEM;
  665. goto bail;
  666. }
  667. if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer,
  668. ((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
  669. dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
  670. __func__);
  671. err = -ENOMEM;
  672. }
  673. bail:
  674. return err;
  675. }
  676. static int rdbg_open(struct inode *inode, struct file *filp)
  677. {
  678. int device_id = -1;
  679. struct rdbg_device *device = &g_rdbg_instance;
  680. struct rdbg_data *rdbgdata = NULL;
  681. int err = 0;
  682. if (!inode || !device->rdbg_data) {
  683. pr_err("Memory not allocated yet");
  684. err = -ENODEV;
  685. goto bail;
  686. }
  687. device_id = MINOR(inode->i_rdev);
  688. rdbgdata = &device->rdbg_data[device_id];
  689. if (rdbgdata->device_opened) {
  690. dev_err(rdbgdata->device, "%s: Device already opened",
  691. __func__);
  692. err = -EEXIST;
  693. goto bail;
  694. }
  695. rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
  696. if (!rdbgdata->smem_size) {
  697. dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
  698. err = -ENOMEM;
  699. goto bail;
  700. }
  701. rdbgdata->smem_addr = smem_find(proc_info[device_id].smem_buffer_addr,
  702. rdbgdata->smem_size, 0, SMEM_ANY_HOST_FLAG);
  703. if (!rdbgdata->smem_addr) {
  704. dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
  705. __func__);
  706. err = -ENOMEM;
  707. goto bail;
  708. }
  709. dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d",
  710. __func__, (unsigned long)rdbgdata->smem_addr,
  711. (unsigned int)rdbgdata->smem_size);
  712. if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
  713. rdbgdata->smem_size/2)) {
  714. dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
  715. __func__, proc_info[device_id].name);
  716. err = -ECOMM;
  717. goto bail;
  718. }
  719. init_completion(&rdbgdata->work);
  720. err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
  721. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
  722. proc_info[device_id].name,
  723. (void *)&device->rdbg_data[device_id]);
  724. if (err) {
  725. dev_err(rdbgdata->device,
  726. "%s: Failed to register interrupt.Err=%d,irqid=%d.",
  727. __func__, err, rdbgdata->in.irq_base_id);
  728. goto irq_bail;
  729. }
  730. err = enable_irq_wake(rdbgdata->in.irq_base_id);
  731. if (err < 0) {
  732. dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
  733. err);
  734. err = 0;
  735. }
  736. mutex_init(&rdbgdata->write_mutex);
  737. err = initialize_smq(rdbgdata);
  738. if (err) {
  739. dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
  740. err);
  741. goto smq_bail;
  742. }
  743. rdbgdata->device_opened = 1;
  744. filp->private_data = (void *)rdbgdata;
  745. return 0;
  746. smq_bail:
  747. smq_dtor(&(rdbgdata->producer_smrb));
  748. smq_dtor(&(rdbgdata->consumer_smrb));
  749. mutex_destroy(&rdbgdata->write_mutex);
  750. irq_bail:
  751. free_irq(rdbgdata->in.irq_base_id, (void *)
  752. &device->rdbg_data[device_id]);
  753. bail:
  754. return err;
  755. }
  756. static int rdbg_release(struct inode *inode, struct file *filp)
  757. {
  758. int device_id = -1;
  759. struct rdbg_device *rdbgdevice = &g_rdbg_instance;
  760. struct rdbg_data *rdbgdata = NULL;
  761. int err = 0;
  762. if (!inode || !rdbgdevice->rdbg_data) {
  763. pr_err("Memory not allocated yet");
  764. err = -ENODEV;
  765. goto bail;
  766. }
  767. device_id = MINOR(inode->i_rdev);
  768. rdbgdata = &rdbgdevice->rdbg_data[device_id];
  769. if (rdbgdata->device_opened == 1) {
  770. dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
  771. proc_info[device_id].name);
  772. rdbgdata->device_opened = 0;
  773. complete(&(rdbgdata->work));
  774. free_irq(rdbgdata->in.irq_base_id, (void *)
  775. &rdbgdevice->rdbg_data[device_id]);
  776. if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
  777. smq_dtor(&(rdbgdevice->rdbg_data[device_id].
  778. producer_smrb));
  779. if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
  780. smq_dtor(&(rdbgdevice->rdbg_data[device_id].
  781. consumer_smrb));
  782. mutex_destroy(&rdbgdata->write_mutex);
  783. }
  784. filp->private_data = NULL;
  785. bail:
  786. return err;
  787. }
  788. static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
  789. loff_t *offset)
  790. {
  791. int err = 0;
  792. struct rdbg_data *rdbgdata = filp->private_data;
  793. void *p_sent_buffer = NULL;
  794. int nsize = 0;
  795. int more = 0;
  796. if (!rdbgdata) {
  797. pr_err("Invalid argument");
  798. err = -EINVAL;
  799. goto bail;
  800. }
  801. dev_dbg(rdbgdata->device, "%s: In receive", __func__);
  802. err = wait_for_completion_interruptible(&(rdbgdata->work));
  803. if (err) {
  804. dev_err(rdbgdata->device, "%s: Error in wait", __func__);
  805. goto bail;
  806. }
  807. smq_check_queue_reset(&(rdbgdata->consumer_smrb),
  808. &(rdbgdata->producer_smrb));
  809. if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer,
  810. &nsize, &more) != SMQ_SUCCESS) {
  811. dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
  812. __func__, err);
  813. err = -ENODATA;
  814. goto bail;
  815. }
  816. size = ((size < nsize) ? size : nsize);
  817. err = copy_to_user(buf, p_sent_buffer, size);
  818. if (err != 0) {
  819. dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
  820. __func__, err);
  821. err = -ENODATA;
  822. goto bail;
  823. }
  824. smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
  825. err = size;
  826. dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx",
  827. __func__, (unsigned long) buf);
  828. bail:
  829. return err;
  830. }
  831. static ssize_t rdbg_write(struct file *filp, const char __user *buf,
  832. size_t size, loff_t *offset)
  833. {
  834. int err = 0;
  835. int num_retries = 0;
  836. struct rdbg_data *rdbgdata = filp->private_data;
  837. if (!rdbgdata) {
  838. pr_err("Invalid argument");
  839. err = -EINVAL;
  840. goto bail;
  841. }
  842. do {
  843. err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size);
  844. dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.",
  845. __func__, err);
  846. } while (err != 0 && num_retries++ < MAX_RETRIES);
  847. if (err != 0) {
  848. err = -ECOMM;
  849. goto bail;
  850. }
  851. send_interrupt_to_subsystem(rdbgdata);
  852. err = size;
  853. bail:
  854. return err;
  855. }
  856. static const struct file_operations rdbg_fops = {
  857. .open = rdbg_open,
  858. .read = rdbg_read,
  859. .write = rdbg_write,
  860. .release = rdbg_release,
  861. };
  862. static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
  863. {
  864. struct device_node *node = NULL;
  865. int cnt = 0;
  866. int id = 0;
  867. node = of_find_compatible_node(NULL, NULL, node_name);
  868. if (node) {
  869. cnt = of_gpio_count(node);
  870. if (cnt && gpio_info_ptr) {
  871. id = of_get_gpio(node, 0);
  872. gpio_info_ptr->gpio_base_id = id;
  873. gpio_info_ptr->irq_base_id = gpio_to_irq(id);
  874. return 0;
  875. }
  876. }
  877. return -EINVAL;
  878. }
  879. static int __init rdbg_init(void)
  880. {
  881. int err = 0;
  882. struct rdbg_device *rdbgdevice = &g_rdbg_instance;
  883. int minor = 0;
  884. int major = 0;
  885. int minor_nodes_created = 0;
  886. char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
  887. int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
  888. char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
  889. if (!node_name) {
  890. err = -ENOMEM;
  891. goto bail;
  892. }
  893. if (rdbgdevice->num_devices < 1 ||
  894. rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
  895. pr_err("rgdb: invalid num_devices");
  896. err = -EDOM;
  897. goto name_bail;
  898. }
  899. rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
  900. sizeof(struct rdbg_data), GFP_KERNEL);
  901. if (!rdbgdevice->rdbg_data) {
  902. err = -ENOMEM;
  903. goto name_bail;
  904. }
  905. err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
  906. rdbgdevice->num_devices, "rdbgctl");
  907. if (err) {
  908. pr_err("Error in alloc_chrdev_region.");
  909. goto data_bail;
  910. }
  911. major = MAJOR(rdbgdevice->dev_no);
  912. cdev_init(&rdbgdevice->cdev, &rdbg_fops);
  913. rdbgdevice->cdev.owner = THIS_MODULE;
  914. err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
  915. rdbgdevice->num_devices);
  916. if (err) {
  917. pr_err("Error in cdev_add");
  918. goto chrdev_bail;
  919. }
  920. rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
  921. if (IS_ERR(rdbgdevice->class)) {
  922. err = PTR_ERR(rdbgdevice->class);
  923. pr_err("Error in class_create");
  924. goto cdev_bail;
  925. }
  926. for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
  927. if (!proc_info[minor].name)
  928. continue;
  929. if (snprintf(node_name, max_len, "%s%d_in",
  930. rdbg_compatible_string, minor) <= 0) {
  931. pr_err("Error in snprintf");
  932. err = -ENOMEM;
  933. goto device_bail;
  934. }
  935. if (register_smp2p(node_name,
  936. &rdbgdevice->rdbg_data[minor].in)) {
  937. pr_debug("No incoming device tree entry found for %s",
  938. proc_info[minor].name);
  939. continue;
  940. }
  941. if (snprintf(node_name, max_len, "%s%d_out",
  942. rdbg_compatible_string, minor) <= 0) {
  943. pr_err("Error in snprintf");
  944. err = -ENOMEM;
  945. goto device_bail;
  946. }
  947. if (register_smp2p(node_name,
  948. &rdbgdevice->rdbg_data[minor].out)) {
  949. pr_err("No outgoing device tree entry found for %s",
  950. proc_info[minor].name);
  951. err = -EINVAL;
  952. goto device_bail;
  953. }
  954. rdbgdevice->rdbg_data[minor].device = device_create(
  955. rdbgdevice->class, NULL, MKDEV(major, minor),
  956. NULL, "%s", proc_info[minor].name);
  957. if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
  958. err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
  959. pr_err("Error in device_create");
  960. goto device_bail;
  961. }
  962. rdbgdevice->rdbg_data[minor].device_initialized = 1;
  963. minor_nodes_created++;
  964. dev_dbg(rdbgdevice->rdbg_data[minor].device,
  965. "%s: created /dev/%s c %d %d'", __func__,
  966. proc_info[minor].name, major, minor);
  967. }
  968. if (!minor_nodes_created) {
  969. pr_err("No device tree entries found");
  970. err = -EINVAL;
  971. goto class_bail;
  972. }
  973. goto name_bail;
  974. device_bail:
  975. for (--minor; minor >= 0; minor--) {
  976. if (rdbgdevice->rdbg_data[minor].device_initialized)
  977. device_destroy(rdbgdevice->class,
  978. MKDEV(MAJOR(rdbgdevice->dev_no), minor));
  979. }
  980. class_bail:
  981. class_destroy(rdbgdevice->class);
  982. cdev_bail:
  983. cdev_del(&rdbgdevice->cdev);
  984. chrdev_bail:
  985. unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
  986. data_bail:
  987. kfree(rdbgdevice->rdbg_data);
  988. name_bail:
  989. kfree(node_name);
  990. bail:
  991. return err;
  992. }
  993. static void __exit rdbg_exit(void)
  994. {
  995. struct rdbg_device *rdbgdevice = &g_rdbg_instance;
  996. int minor;
  997. for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
  998. if (rdbgdevice->rdbg_data[minor].device_initialized) {
  999. device_destroy(rdbgdevice->class,
  1000. MKDEV(MAJOR(rdbgdevice->dev_no), minor));
  1001. }
  1002. }
  1003. class_destroy(rdbgdevice->class);
  1004. cdev_del(&rdbgdevice->cdev);
  1005. unregister_chrdev_region(rdbgdevice->dev_no, 1);
  1006. kfree(rdbgdevice->rdbg_data);
  1007. }
  1008. module_init(rdbg_init);
  1009. module_exit(rdbg_exit);
  1010. MODULE_DESCRIPTION("rdbg module");
  1011. MODULE_LICENSE("GPL v2");