mtk-afe-fe-dai.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
  3. *
  4. * Copyright (c) 2016 MediaTek Inc.
  5. * Author: Garlic Tseng <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 and
  9. * only version 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/module.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/regmap.h>
  19. #include <sound/soc.h>
  20. #include "mtk-afe-fe-dai.h"
  21. #include "mtk-base-afe.h"
  22. #define AFE_BASE_END_OFFSET 8
  23. static int mtk_regmap_update_bits(struct regmap *map, int reg,
  24. unsigned int mask,
  25. unsigned int val)
  26. {
  27. if (reg < 0)
  28. return 0;
  29. return regmap_update_bits(map, reg, mask, val);
  30. }
  31. static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
  32. {
  33. if (reg < 0)
  34. return 0;
  35. return regmap_write(map, reg, val);
  36. }
  37. int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
  38. struct snd_soc_dai *dai)
  39. {
  40. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  41. struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
  42. struct snd_pcm_runtime *runtime = substream->runtime;
  43. int memif_num = rtd->cpu_dai->id;
  44. struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
  45. const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
  46. int ret;
  47. memif->substream = substream;
  48. snd_pcm_hw_constraint_step(substream->runtime, 0,
  49. SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
  50. /* enable agent */
  51. mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
  52. 1 << memif->data->agent_disable_shift,
  53. 0 << memif->data->agent_disable_shift);
  54. snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
  55. /*
  56. * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
  57. * smaller than period_size due to AFE's internal buffer.
  58. * This easily leads to overrun when avail_min is period_size.
  59. * One more period can hold the possible unread buffer.
  60. */
  61. if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
  62. int periods_max = mtk_afe_hardware->periods_max;
  63. ret = snd_pcm_hw_constraint_minmax(runtime,
  64. SNDRV_PCM_HW_PARAM_PERIODS,
  65. 3, periods_max);
  66. if (ret < 0) {
  67. dev_err(afe->dev, "hw_constraint_minmax failed\n");
  68. return ret;
  69. }
  70. }
  71. ret = snd_pcm_hw_constraint_integer(runtime,
  72. SNDRV_PCM_HW_PARAM_PERIODS);
  73. if (ret < 0)
  74. dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
  75. /* dynamic allocate irq to memif */
  76. if (memif->irq_usage < 0) {
  77. int irq_id = mtk_dynamic_irq_acquire(afe);
  78. if (irq_id != afe->irqs_size) {
  79. /* link */
  80. memif->irq_usage = irq_id;
  81. } else {
  82. dev_err(afe->dev, "%s() error: no more asys irq\n",
  83. __func__);
  84. ret = -EBUSY;
  85. }
  86. }
  87. return ret;
  88. }
  89. EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
  90. void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
  91. struct snd_soc_dai *dai)
  92. {
  93. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  94. struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
  95. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  96. int irq_id;
  97. irq_id = memif->irq_usage;
  98. mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
  99. 1 << memif->data->agent_disable_shift,
  100. 1 << memif->data->agent_disable_shift);
  101. if (!memif->const_irq) {
  102. mtk_dynamic_irq_release(afe, irq_id);
  103. memif->irq_usage = -1;
  104. memif->substream = NULL;
  105. }
  106. }
  107. EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
  108. int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
  109. struct snd_pcm_hw_params *params,
  110. struct snd_soc_dai *dai)
  111. {
  112. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  113. struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
  114. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  115. int msb_at_bit33 = 0;
  116. int ret, fs = 0;
  117. ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
  118. if (ret < 0)
  119. return ret;
  120. msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
  121. memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
  122. memif->buffer_size = substream->runtime->dma_bytes;
  123. /* start */
  124. mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
  125. memif->phys_buf_addr);
  126. /* end */
  127. mtk_regmap_write(afe->regmap,
  128. memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
  129. memif->phys_buf_addr + memif->buffer_size - 1);
  130. /* set MSB to 33-bit */
  131. mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
  132. 1 << memif->data->msb_shift,
  133. msb_at_bit33 << memif->data->msb_shift);
  134. /* set channel */
  135. if (memif->data->mono_shift >= 0) {
  136. unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
  137. mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
  138. 1 << memif->data->mono_shift,
  139. mono << memif->data->mono_shift);
  140. }
  141. /* set rate */
  142. if (memif->data->fs_shift < 0)
  143. return 0;
  144. fs = afe->memif_fs(substream, params_rate(params));
  145. if (fs < 0)
  146. return -EINVAL;
  147. mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
  148. memif->data->fs_maskbit << memif->data->fs_shift,
  149. fs << memif->data->fs_shift);
  150. return 0;
  151. }
  152. EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
  153. int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
  154. struct snd_soc_dai *dai)
  155. {
  156. return snd_pcm_lib_free_pages(substream);
  157. }
  158. EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
  159. int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
  160. struct snd_soc_dai *dai)
  161. {
  162. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  163. struct snd_pcm_runtime * const runtime = substream->runtime;
  164. struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
  165. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  166. struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
  167. const struct mtk_base_irq_data *irq_data = irqs->irq_data;
  168. unsigned int counter = runtime->period_size;
  169. int fs;
  170. dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
  171. switch (cmd) {
  172. case SNDRV_PCM_TRIGGER_START:
  173. case SNDRV_PCM_TRIGGER_RESUME:
  174. if (memif->data->enable_shift >= 0)
  175. mtk_regmap_update_bits(afe->regmap,
  176. memif->data->enable_reg,
  177. 1 << memif->data->enable_shift,
  178. 1 << memif->data->enable_shift);
  179. /* set irq counter */
  180. mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
  181. irq_data->irq_cnt_maskbit
  182. << irq_data->irq_cnt_shift,
  183. counter << irq_data->irq_cnt_shift);
  184. /* set irq fs */
  185. fs = afe->irq_fs(substream, runtime->rate);
  186. if (fs < 0)
  187. return -EINVAL;
  188. mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
  189. irq_data->irq_fs_maskbit
  190. << irq_data->irq_fs_shift,
  191. fs << irq_data->irq_fs_shift);
  192. /* enable interrupt */
  193. mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
  194. 1 << irq_data->irq_en_shift,
  195. 1 << irq_data->irq_en_shift);
  196. return 0;
  197. case SNDRV_PCM_TRIGGER_STOP:
  198. case SNDRV_PCM_TRIGGER_SUSPEND:
  199. mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
  200. 1 << memif->data->enable_shift, 0);
  201. /* disable interrupt */
  202. mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
  203. 1 << irq_data->irq_en_shift,
  204. 0 << irq_data->irq_en_shift);
  205. /* and clear pending IRQ */
  206. mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
  207. 1 << irq_data->irq_clr_shift);
  208. return 0;
  209. default:
  210. return -EINVAL;
  211. }
  212. }
  213. EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
  214. int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
  215. struct snd_soc_dai *dai)
  216. {
  217. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  218. struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
  219. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  220. int hd_audio = 0;
  221. /* set hd mode */
  222. switch (substream->runtime->format) {
  223. case SNDRV_PCM_FORMAT_S16_LE:
  224. hd_audio = 0;
  225. break;
  226. case SNDRV_PCM_FORMAT_S32_LE:
  227. hd_audio = 1;
  228. break;
  229. case SNDRV_PCM_FORMAT_S24_LE:
  230. hd_audio = 1;
  231. break;
  232. default:
  233. dev_err(afe->dev, "%s() error: unsupported format %d\n",
  234. __func__, substream->runtime->format);
  235. break;
  236. }
  237. mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
  238. 1 << memif->data->hd_shift,
  239. hd_audio << memif->data->hd_shift);
  240. return 0;
  241. }
  242. EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
  243. const struct snd_soc_dai_ops mtk_afe_fe_ops = {
  244. .startup = mtk_afe_fe_startup,
  245. .shutdown = mtk_afe_fe_shutdown,
  246. .hw_params = mtk_afe_fe_hw_params,
  247. .hw_free = mtk_afe_fe_hw_free,
  248. .prepare = mtk_afe_fe_prepare,
  249. .trigger = mtk_afe_fe_trigger,
  250. };
  251. EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
  252. static DEFINE_MUTEX(irqs_lock);
  253. int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
  254. {
  255. int i;
  256. mutex_lock(&afe->irq_alloc_lock);
  257. for (i = 0; i < afe->irqs_size; ++i) {
  258. if (afe->irqs[i].irq_occupyed == 0) {
  259. afe->irqs[i].irq_occupyed = 1;
  260. mutex_unlock(&afe->irq_alloc_lock);
  261. return i;
  262. }
  263. }
  264. mutex_unlock(&afe->irq_alloc_lock);
  265. return afe->irqs_size;
  266. }
  267. EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
  268. int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
  269. {
  270. mutex_lock(&afe->irq_alloc_lock);
  271. if (irq_id >= 0 && irq_id < afe->irqs_size) {
  272. afe->irqs[irq_id].irq_occupyed = 0;
  273. mutex_unlock(&afe->irq_alloc_lock);
  274. return 0;
  275. }
  276. mutex_unlock(&afe->irq_alloc_lock);
  277. return -EINVAL;
  278. }
  279. EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
  280. int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
  281. {
  282. struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
  283. struct device *dev = afe->dev;
  284. struct regmap *regmap = afe->regmap;
  285. int i;
  286. if (pm_runtime_status_suspended(dev) || afe->suspended)
  287. return 0;
  288. if (!afe->reg_back_up)
  289. afe->reg_back_up =
  290. devm_kcalloc(dev, afe->reg_back_up_list_num,
  291. sizeof(unsigned int), GFP_KERNEL);
  292. for (i = 0; i < afe->reg_back_up_list_num; i++)
  293. regmap_read(regmap, afe->reg_back_up_list[i],
  294. &afe->reg_back_up[i]);
  295. afe->suspended = true;
  296. afe->runtime_suspend(dev);
  297. return 0;
  298. }
  299. EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
  300. int mtk_afe_dai_resume(struct snd_soc_dai *dai)
  301. {
  302. struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
  303. struct device *dev = afe->dev;
  304. struct regmap *regmap = afe->regmap;
  305. int i = 0;
  306. if (pm_runtime_status_suspended(dev) || !afe->suspended)
  307. return 0;
  308. afe->runtime_resume(dev);
  309. if (!afe->reg_back_up)
  310. dev_dbg(dev, "%s no reg_backup\n", __func__);
  311. for (i = 0; i < afe->reg_back_up_list_num; i++)
  312. mtk_regmap_write(regmap, afe->reg_back_up_list[i],
  313. afe->reg_back_up[i]);
  314. afe->suspended = false;
  315. return 0;
  316. }
  317. EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
  318. MODULE_DESCRIPTION("Mediatek simple fe dai operator");
  319. MODULE_AUTHOR("Garlic Tseng <[email protected]>");
  320. MODULE_LICENSE("GPL v2");