coresight-byte-cntr.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/interrupt.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/fs.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/delay.h>
  18. #include "coresight-byte-cntr.h"
  19. #include "coresight-priv.h"
  20. #include "coresight-tmc.h"
  21. static struct tmc_drvdata *tmcdrvdata;
  22. static void tmc_etr_read_bytes(struct byte_cntr *byte_cntr_data, loff_t *ppos,
  23. size_t bytes, size_t *len, char **bufp)
  24. {
  25. if (*bufp >= (char *)(tmcdrvdata->vaddr + tmcdrvdata->size))
  26. *bufp = tmcdrvdata->vaddr;
  27. if (*len >= bytes)
  28. *len = bytes;
  29. else if (((uint32_t)*ppos % bytes) + *len > bytes)
  30. *len = bytes - ((uint32_t)*ppos % bytes);
  31. if ((*bufp + *len) > (char *)(tmcdrvdata->vaddr +
  32. tmcdrvdata->size))
  33. *len = (char *)(tmcdrvdata->vaddr + tmcdrvdata->size) -
  34. *bufp;
  35. if (*len == bytes || (*len + (uint32_t)*ppos) % bytes == 0)
  36. atomic_dec(&byte_cntr_data->irq_cnt);
  37. }
  38. static void tmc_etr_sg_read_pos(loff_t *ppos,
  39. size_t bytes, bool noirq, size_t *len,
  40. char **bufpp)
  41. {
  42. uint32_t rwp, i = 0;
  43. uint32_t blk_num, sg_tbl_num, blk_num_loc, read_off;
  44. uint32_t *virt_pte, *virt_st_tbl;
  45. void *virt_blk;
  46. phys_addr_t phys_pte;
  47. int total_ents = DIV_ROUND_UP(tmcdrvdata->size, PAGE_SIZE);
  48. int ents_per_pg = PAGE_SIZE/sizeof(uint32_t);
  49. if (*len == 0)
  50. return;
  51. blk_num = *ppos / PAGE_SIZE;
  52. read_off = *ppos % PAGE_SIZE;
  53. virt_st_tbl = (uint32_t *)tmcdrvdata->vaddr;
  54. /* Compute table index and block entry index within that table */
  55. if (blk_num && (blk_num == (total_ents - 1)) &&
  56. !(blk_num % (ents_per_pg - 1))) {
  57. sg_tbl_num = blk_num / ents_per_pg;
  58. blk_num_loc = ents_per_pg - 1;
  59. } else {
  60. sg_tbl_num = blk_num / (ents_per_pg - 1);
  61. blk_num_loc = blk_num % (ents_per_pg - 1);
  62. }
  63. for (i = 0; i < sg_tbl_num; i++) {
  64. virt_pte = virt_st_tbl + (ents_per_pg - 1);
  65. phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
  66. virt_st_tbl = (uint32_t *)phys_to_virt(phys_pte);
  67. }
  68. virt_pte = virt_st_tbl + blk_num_loc;
  69. phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
  70. virt_blk = phys_to_virt(phys_pte);
  71. *bufpp = (char *)(virt_blk + read_off);
  72. if (noirq) {
  73. rwp = readl_relaxed(tmcdrvdata->base + TMC_RWP);
  74. tmc_etr_sg_rwp_pos(tmcdrvdata, rwp);
  75. if (tmcdrvdata->sg_blk_num == blk_num &&
  76. rwp >= (phys_pte + read_off))
  77. *len = rwp - phys_pte - read_off;
  78. else if (tmcdrvdata->sg_blk_num > blk_num)
  79. *len = PAGE_SIZE - read_off;
  80. else
  81. *len = bytes;
  82. } else {
  83. if (*len > (PAGE_SIZE - read_off))
  84. *len = PAGE_SIZE - read_off;
  85. if (*len >= (bytes - ((uint32_t)*ppos % bytes)))
  86. *len = bytes - ((uint32_t)*ppos % bytes);
  87. if (*len == bytes || (*len + (uint32_t)*ppos) % bytes == 0)
  88. atomic_dec(&tmcdrvdata->byte_cntr->irq_cnt);
  89. }
  90. /*
  91. * Invalidate cache range before reading. This will make sure that CPU
  92. * reads latest contents from DDR
  93. */
  94. dmac_inv_range((void *)(*bufpp), (void *)(*bufpp) + *len);
  95. }
  96. static irqreturn_t etr_handler(int irq, void *data)
  97. {
  98. struct byte_cntr *byte_cntr_data = data;
  99. atomic_inc(&byte_cntr_data->irq_cnt);
  100. wake_up(&byte_cntr_data->wq);
  101. return IRQ_HANDLED;
  102. }
  103. static void tmc_etr_flush_bytes(loff_t *ppos, size_t bytes, size_t *len)
  104. {
  105. uint32_t rwp = 0;
  106. rwp = readl_relaxed(tmcdrvdata->base + TMC_RWP);
  107. if (rwp >= (tmcdrvdata->paddr + *ppos)) {
  108. if (bytes > (rwp - tmcdrvdata->paddr - *ppos))
  109. *len = rwp - tmcdrvdata->paddr - *ppos;
  110. }
  111. }
  112. static ssize_t tmc_etr_byte_cntr_read(struct file *fp, char __user *data,
  113. size_t len, loff_t *ppos)
  114. {
  115. struct byte_cntr *byte_cntr_data = fp->private_data;
  116. char *bufp;
  117. if (!data)
  118. return -EINVAL;
  119. mutex_lock(&byte_cntr_data->byte_cntr_lock);
  120. if (!byte_cntr_data->read_active)
  121. goto err0;
  122. bufp = (char *)(tmcdrvdata->buf + *ppos);
  123. if (byte_cntr_data->enable) {
  124. if (!atomic_read(&byte_cntr_data->irq_cnt)) {
  125. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  126. if (wait_event_interruptible(byte_cntr_data->wq,
  127. atomic_read(&byte_cntr_data->irq_cnt) > 0))
  128. return -ERESTARTSYS;
  129. mutex_lock(&byte_cntr_data->byte_cntr_lock);
  130. if (!byte_cntr_data->read_active)
  131. goto err0;
  132. }
  133. if (tmcdrvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG)
  134. tmc_etr_read_bytes(byte_cntr_data, ppos,
  135. byte_cntr_data->block_size, &len,
  136. &bufp);
  137. else
  138. tmc_etr_sg_read_pos(ppos, byte_cntr_data->block_size, 0,
  139. &len, &bufp);
  140. } else {
  141. if (!atomic_read(&byte_cntr_data->irq_cnt)) {
  142. if (tmcdrvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG)
  143. tmc_etr_flush_bytes(ppos,
  144. byte_cntr_data->block_size,
  145. &len);
  146. else
  147. tmc_etr_sg_read_pos(ppos,
  148. byte_cntr_data->block_size,
  149. 1,
  150. &len, &bufp);
  151. if (!len)
  152. goto err0;
  153. } else {
  154. if (tmcdrvdata->memtype == TMC_ETR_MEM_TYPE_CONTIG)
  155. tmc_etr_read_bytes(byte_cntr_data, ppos,
  156. byte_cntr_data->block_size,
  157. &len, &bufp);
  158. else
  159. tmc_etr_sg_read_pos(ppos,
  160. byte_cntr_data->block_size,
  161. 1,
  162. &len, &bufp);
  163. }
  164. }
  165. if (copy_to_user(data, bufp, len)) {
  166. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  167. dev_dbg(tmcdrvdata->dev, "%s: copy_to_user failed\n", __func__);
  168. return -EFAULT;
  169. }
  170. if (*ppos + len >= tmcdrvdata->size)
  171. *ppos = 0;
  172. else
  173. *ppos += len;
  174. err0:
  175. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  176. return len;
  177. }
  178. void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data)
  179. {
  180. if (!byte_cntr_data)
  181. return;
  182. mutex_lock(&byte_cntr_data->byte_cntr_lock);
  183. if (byte_cntr_data->block_size == 0) {
  184. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  185. return;
  186. }
  187. atomic_set(&byte_cntr_data->irq_cnt, 0);
  188. byte_cntr_data->enable = true;
  189. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  190. }
  191. EXPORT_SYMBOL(tmc_etr_byte_cntr_start);
  192. void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data)
  193. {
  194. if (!byte_cntr_data)
  195. return;
  196. mutex_lock(&byte_cntr_data->byte_cntr_lock);
  197. byte_cntr_data->enable = false;
  198. coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
  199. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  200. }
  201. EXPORT_SYMBOL(tmc_etr_byte_cntr_stop);
  202. static int tmc_etr_byte_cntr_release(struct inode *in, struct file *fp)
  203. {
  204. struct byte_cntr *byte_cntr_data = fp->private_data;
  205. mutex_lock(&byte_cntr_data->byte_cntr_lock);
  206. byte_cntr_data->read_active = false;
  207. coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
  208. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  209. return 0;
  210. }
  211. static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
  212. {
  213. struct byte_cntr *byte_cntr_data =
  214. container_of(in->i_cdev, struct byte_cntr, dev);
  215. mutex_lock(&byte_cntr_data->byte_cntr_lock);
  216. if (!tmcdrvdata->enable || !byte_cntr_data->block_size) {
  217. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  218. return -EINVAL;
  219. }
  220. /* IRQ is a '8- byte' counter and to observe interrupt at
  221. * 'block_size' bytes of data
  222. */
  223. coresight_csr_set_byte_cntr(byte_cntr_data->csr,
  224. (byte_cntr_data->block_size) / 8);
  225. fp->private_data = byte_cntr_data;
  226. nonseekable_open(in, fp);
  227. byte_cntr_data->enable = true;
  228. byte_cntr_data->read_active = true;
  229. mutex_unlock(&byte_cntr_data->byte_cntr_lock);
  230. return 0;
  231. }
  232. static const struct file_operations byte_cntr_fops = {
  233. .owner = THIS_MODULE,
  234. .open = tmc_etr_byte_cntr_open,
  235. .read = tmc_etr_byte_cntr_read,
  236. .release = tmc_etr_byte_cntr_release,
  237. .llseek = no_llseek,
  238. };
  239. static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
  240. {
  241. int ret;
  242. unsigned int baseminor = 0;
  243. unsigned int count = 1;
  244. struct device *device;
  245. dev_t dev;
  246. ret = alloc_chrdev_region(&dev, baseminor, count, "byte-cntr");
  247. if (ret < 0) {
  248. pr_err("alloc_chrdev_region failed %d\n", ret);
  249. return ret;
  250. }
  251. cdev_init(&byte_cntr_data->dev, &byte_cntr_fops);
  252. byte_cntr_data->dev.owner = THIS_MODULE;
  253. byte_cntr_data->dev.ops = &byte_cntr_fops;
  254. ret = cdev_add(&byte_cntr_data->dev, dev, 1);
  255. if (ret)
  256. goto exit_unreg_chrdev_region;
  257. byte_cntr_data->driver_class = class_create(THIS_MODULE,
  258. "coresight-tmc-etr-stream");
  259. if (IS_ERR(byte_cntr_data->driver_class)) {
  260. ret = -ENOMEM;
  261. pr_err("class_create failed %d\n", ret);
  262. goto exit_unreg_chrdev_region;
  263. }
  264. device = device_create(byte_cntr_data->driver_class, NULL,
  265. byte_cntr_data->dev.dev, byte_cntr_data,
  266. "byte-cntr");
  267. if (IS_ERR(device)) {
  268. pr_err("class_device_create failed %d\n", ret);
  269. ret = -ENOMEM;
  270. goto exit_destroy_class;
  271. }
  272. return 0;
  273. exit_destroy_class:
  274. class_destroy(byte_cntr_data->driver_class);
  275. exit_unreg_chrdev_region:
  276. unregister_chrdev_region(byte_cntr_data->dev.dev, 1);
  277. return ret;
  278. }
  279. struct byte_cntr *byte_cntr_init(struct amba_device *adev,
  280. struct tmc_drvdata *drvdata)
  281. {
  282. struct device *dev = &adev->dev;
  283. struct device_node *np = adev->dev.of_node;
  284. int byte_cntr_irq;
  285. int ret;
  286. struct byte_cntr *byte_cntr_data;
  287. byte_cntr_irq = of_irq_get_byname(np, "byte-cntr-irq");
  288. if (byte_cntr_irq < 0)
  289. return NULL;
  290. byte_cntr_data = devm_kzalloc(dev, sizeof(*byte_cntr_data), GFP_KERNEL);
  291. if (!byte_cntr_data)
  292. return NULL;
  293. ret = devm_request_irq(dev, byte_cntr_irq, etr_handler,
  294. IRQF_TRIGGER_RISING | IRQF_SHARED,
  295. "tmc-etr", byte_cntr_data);
  296. if (ret) {
  297. devm_kfree(dev, byte_cntr_data);
  298. dev_err(dev, "Byte_cntr interrupt registration failed\n");
  299. return NULL;
  300. }
  301. ret = byte_cntr_register_chardev(byte_cntr_data);
  302. if (ret) {
  303. devm_free_irq(dev, byte_cntr_irq, byte_cntr_data);
  304. devm_kfree(dev, byte_cntr_data);
  305. dev_err(dev, "Byte_cntr char dev registration failed\n");
  306. return NULL;
  307. }
  308. tmcdrvdata = drvdata;
  309. byte_cntr_data->byte_cntr_irq = byte_cntr_irq;
  310. byte_cntr_data->csr = drvdata->csr;
  311. atomic_set(&byte_cntr_data->irq_cnt, 0);
  312. init_waitqueue_head(&byte_cntr_data->wq);
  313. mutex_init(&byte_cntr_data->byte_cntr_lock);
  314. return byte_cntr_data;
  315. }
  316. EXPORT_SYMBOL(byte_cntr_init);