spi-geni-qcom.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533
  1. /*
  2. * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/clk.h>
  15. #include <linux/dmaengine.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/ipc_logging.h>
  19. #include <linux/io.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_platform.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/qcom-geni-se.h>
  25. #include <linux/msm_gpi.h>
  26. #include <linux/spi/spi.h>
  27. #include <linux/spi/spi-geni-qcom.h>
  28. #define SPI_NUM_CHIPSELECT (4)
  29. #define SPI_XFER_TIMEOUT_MS (250)
  30. #define SPI_AUTO_SUSPEND_DELAY (250)
  31. /* SPI SE specific registers */
  32. #define SE_SPI_CPHA (0x224)
  33. #define SE_SPI_LOOPBACK (0x22C)
  34. #define SE_SPI_CPOL (0x230)
  35. #define SE_SPI_DEMUX_OUTPUT_INV (0x24C)
  36. #define SE_SPI_DEMUX_SEL (0x250)
  37. #define SE_SPI_TRANS_CFG (0x25C)
  38. #define SE_SPI_WORD_LEN (0x268)
  39. #define SE_SPI_TX_TRANS_LEN (0x26C)
  40. #define SE_SPI_RX_TRANS_LEN (0x270)
  41. #define SE_SPI_PRE_POST_CMD_DLY (0x274)
  42. #define SE_SPI_DELAY_COUNTERS (0x278)
  43. /* SE_SPI_CPHA register fields */
  44. #define CPHA (BIT(0))
  45. /* SE_SPI_LOOPBACK register fields */
  46. #define LOOPBACK_ENABLE (0x1)
  47. #define NORMAL_MODE (0x0)
  48. #define LOOPBACK_MSK (GENMASK(1, 0))
  49. /* SE_SPI_CPOL register fields */
  50. #define CPOL (BIT(2))
  51. /* SE_SPI_DEMUX_OUTPUT_INV register fields */
  52. #define CS_DEMUX_OUTPUT_INV_MSK (GENMASK(3, 0))
  53. /* SE_SPI_DEMUX_SEL register fields */
  54. #define CS_DEMUX_OUTPUT_SEL (GENMASK(3, 0))
  55. /* SE_SPI_TX_TRANS_CFG register fields */
  56. #define CS_TOGGLE (BIT(0))
  57. /* SE_SPI_WORD_LEN register fields */
  58. #define WORD_LEN_MSK (GENMASK(9, 0))
  59. #define MIN_WORD_LEN (4)
  60. /* SPI_TX/SPI_RX_TRANS_LEN fields */
  61. #define TRANS_LEN_MSK (GENMASK(23, 0))
  62. /* SE_SPI_DELAY_COUNTERS */
  63. #define SPI_INTER_WORDS_DELAY_MSK (GENMASK(9, 0))
  64. #define SPI_CS_CLK_DELAY_MSK (GENMASK(19, 10))
  65. #define SPI_CS_CLK_DELAY_SHFT (10)
  66. /* M_CMD OP codes for SPI */
  67. #define SPI_TX_ONLY (1)
  68. #define SPI_RX_ONLY (2)
  69. #define SPI_FULL_DUPLEX (3)
  70. #define SPI_TX_RX (7)
  71. #define SPI_CS_ASSERT (8)
  72. #define SPI_CS_DEASSERT (9)
  73. #define SPI_SCK_ONLY (10)
  74. /* M_CMD params for SPI */
  75. #define SPI_PRE_CMD_DELAY BIT(0)
  76. #define TIMESTAMP_BEFORE BIT(1)
  77. #define FRAGMENTATION BIT(2)
  78. #define TIMESTAMP_AFTER BIT(3)
  79. #define POST_CMD_DELAY BIT(4)
  80. #define SPI_CORE2X_VOTE (10000)
  81. /* GSI CONFIG0 TRE Params */
  82. /* Flags bit fields */
  83. #define GSI_LOOPBACK_EN (BIT(0))
  84. #define GSI_CS_TOGGLE (BIT(3))
  85. #define GSI_CPHA (BIT(4))
  86. #define GSI_CPOL (BIT(5))
  87. #define MAX_TX_SG (3)
  88. #define NUM_SPI_XFER (8)
  89. struct gsi_desc_cb {
  90. struct spi_master *spi;
  91. struct spi_transfer *xfer;
  92. };
  93. struct spi_geni_gsi {
  94. struct msm_gpi_tre config0_tre;
  95. struct msm_gpi_tre go_tre;
  96. struct msm_gpi_tre tx_dma_tre;
  97. struct msm_gpi_tre rx_dma_tre;
  98. struct scatterlist tx_sg[MAX_TX_SG];
  99. struct scatterlist rx_sg;
  100. dma_cookie_t tx_cookie;
  101. dma_cookie_t rx_cookie;
  102. struct msm_gpi_dma_async_tx_cb_param tx_cb_param;
  103. struct msm_gpi_dma_async_tx_cb_param rx_cb_param;
  104. struct dma_async_tx_descriptor *tx_desc;
  105. struct dma_async_tx_descriptor *rx_desc;
  106. struct gsi_desc_cb desc_cb;
  107. };
  108. struct spi_geni_master {
  109. struct se_geni_rsc spi_rsc;
  110. resource_size_t phys_addr;
  111. resource_size_t size;
  112. void __iomem *base;
  113. int irq;
  114. struct device *dev;
  115. int rx_fifo_depth;
  116. int tx_fifo_depth;
  117. int tx_fifo_width;
  118. int tx_wm;
  119. bool setup;
  120. u32 cur_speed_hz;
  121. int cur_word_len;
  122. unsigned int tx_rem_bytes;
  123. unsigned int rx_rem_bytes;
  124. struct spi_transfer *cur_xfer;
  125. struct completion xfer_done;
  126. struct device *wrapper_dev;
  127. int oversampling;
  128. struct spi_geni_gsi *gsi;
  129. struct dma_chan *tx;
  130. struct dma_chan *rx;
  131. struct msm_gpi_ctrl tx_event;
  132. struct msm_gpi_ctrl rx_event;
  133. struct completion tx_cb;
  134. struct completion rx_cb;
  135. bool qn_err;
  136. int cur_xfer_mode;
  137. int num_tx_eot;
  138. int num_rx_eot;
  139. int num_xfers;
  140. void *ipc;
  141. bool shared_se;
  142. bool dis_autosuspend;
  143. };
  144. static struct spi_master *get_spi_master(struct device *dev)
  145. {
  146. struct platform_device *pdev = to_platform_device(dev);
  147. struct spi_master *spi = platform_get_drvdata(pdev);
  148. return spi;
  149. }
  150. static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
  151. int *clk_idx, int *clk_div)
  152. {
  153. unsigned long sclk_freq;
  154. struct se_geni_rsc *rsc = &mas->spi_rsc;
  155. int ret = 0;
  156. ret = geni_se_clk_freq_match(&mas->spi_rsc,
  157. (speed_hz * mas->oversampling), clk_idx,
  158. &sclk_freq, true);
  159. if (ret) {
  160. dev_err(mas->dev, "%s: Failed(%d) to find src clk for 0x%x\n",
  161. __func__, ret, speed_hz);
  162. return ret;
  163. }
  164. *clk_div = ((sclk_freq / mas->oversampling) / speed_hz);
  165. if (!(*clk_div)) {
  166. dev_err(mas->dev, "%s:Err:sclk:%lu oversampling:%d speed:%u\n",
  167. __func__, sclk_freq, mas->oversampling, speed_hz);
  168. return -EINVAL;
  169. }
  170. dev_dbg(mas->dev, "%s: req %u sclk %lu, idx %d, div %d\n", __func__,
  171. speed_hz, sclk_freq, *clk_idx, *clk_div);
  172. ret = clk_set_rate(rsc->se_clk, sclk_freq);
  173. if (ret)
  174. dev_err(mas->dev, "%s: clk_set_rate failed %d\n",
  175. __func__, ret);
  176. return ret;
  177. }
  178. static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
  179. int bits_per_word)
  180. {
  181. int pack_words = 1;
  182. bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
  183. u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
  184. unsigned long cfg0, cfg1;
  185. /*
  186. * If bits_per_word isn't a byte aligned value, set the packing to be
  187. * 1 SPI word per FIFO word.
  188. */
  189. if (!(mas->tx_fifo_width % bits_per_word))
  190. pack_words = mas->tx_fifo_width / bits_per_word;
  191. word_len &= ~WORD_LEN_MSK;
  192. word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
  193. se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
  194. geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
  195. se_get_packing_config(bits_per_word, pack_words, msb_first,
  196. &cfg0, &cfg1);
  197. GENI_SE_DBG(mas->ipc, false, mas->dev,
  198. "%s: cfg0 %lu cfg1 %lu bpw %d pack_words %d\n", __func__,
  199. cfg0, cfg1, bits_per_word, pack_words);
  200. }
  201. static int setup_fifo_params(struct spi_device *spi_slv,
  202. struct spi_master *spi)
  203. {
  204. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  205. u16 mode = spi_slv->mode;
  206. u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
  207. u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
  208. u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
  209. u32 demux_sel = 0;
  210. u32 demux_output_inv = 0;
  211. u32 clk_sel = 0;
  212. u32 m_clk_cfg = 0;
  213. int ret = 0;
  214. int idx;
  215. int div;
  216. struct spi_geni_qcom_ctrl_data *delay_params = NULL;
  217. u32 spi_delay_params = 0;
  218. loopback_cfg &= ~LOOPBACK_MSK;
  219. cpol &= ~CPOL;
  220. cpha &= ~CPHA;
  221. if (mode & SPI_LOOP)
  222. loopback_cfg |= LOOPBACK_ENABLE;
  223. if (mode & SPI_CPOL)
  224. cpol |= CPOL;
  225. if (mode & SPI_CPHA)
  226. cpha |= CPHA;
  227. if (spi_slv->mode & SPI_CS_HIGH)
  228. demux_output_inv |= BIT(spi_slv->chip_select);
  229. if (spi_slv->controller_data) {
  230. u32 cs_clk_delay = 0;
  231. u32 inter_words_delay = 0;
  232. delay_params =
  233. (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
  234. cs_clk_delay =
  235. (delay_params->spi_cs_clk_delay << SPI_CS_CLK_DELAY_SHFT)
  236. & SPI_CS_CLK_DELAY_MSK;
  237. inter_words_delay =
  238. delay_params->spi_inter_words_delay &
  239. SPI_INTER_WORDS_DELAY_MSK;
  240. spi_delay_params =
  241. (inter_words_delay | cs_clk_delay);
  242. }
  243. demux_sel = spi_slv->chip_select;
  244. mas->cur_speed_hz = spi_slv->max_speed_hz;
  245. mas->cur_word_len = spi_slv->bits_per_word;
  246. ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
  247. if (ret) {
  248. dev_err(mas->dev, "Err setting clks ret(%d) for %d\n",
  249. ret, mas->cur_speed_hz);
  250. goto setup_fifo_params_exit;
  251. }
  252. clk_sel |= (idx & CLK_SEL_MSK);
  253. m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
  254. spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
  255. geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
  256. geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
  257. geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
  258. geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
  259. geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
  260. geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
  261. geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
  262. geni_write_reg(spi_delay_params, mas->base, SE_SPI_DELAY_COUNTERS);
  263. GENI_SE_DBG(mas->ipc, false, mas->dev,
  264. "%s:Loopback%d demux_sel0x%x demux_op_inv 0x%x clk_cfg 0x%x\n",
  265. __func__, loopback_cfg, demux_sel, demux_output_inv, m_clk_cfg);
  266. GENI_SE_DBG(mas->ipc, false, mas->dev,
  267. "%s:clk_sel 0x%x cpol %d cpha %d delay 0x%x\n", __func__,
  268. clk_sel, cpol, cpha, spi_delay_params);
  269. /* Ensure message level attributes are written before returning */
  270. mb();
  271. setup_fifo_params_exit:
  272. return ret;
  273. }
  274. static int select_xfer_mode(struct spi_master *spi,
  275. struct spi_message *spi_msg)
  276. {
  277. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  278. int mode = FIFO_MODE;
  279. int fifo_disable = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
  280. FIFO_IF_DISABLE);
  281. bool dma_chan_valid =
  282. !(IS_ERR_OR_NULL(mas->tx) || IS_ERR_OR_NULL(mas->rx));
  283. /*
  284. * If FIFO Interface is disabled and there are no DMA channels then we
  285. * can't do this transfer.
  286. * If FIFO interface is disabled, we can do GSI only,
  287. * else pick FIFO mode.
  288. */
  289. if (fifo_disable && !dma_chan_valid)
  290. mode = -EINVAL;
  291. else if (dma_chan_valid)
  292. mode = GSI_DMA;
  293. else
  294. mode = FIFO_MODE;
  295. return mode;
  296. }
  297. static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
  298. struct spi_geni_master *mas,
  299. struct spi_master *spi, u16 mode,
  300. u32 cs_clk_delay, u32 inter_words_delay)
  301. {
  302. struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
  303. u8 flags = 0;
  304. u8 word_len = 0;
  305. u8 pack = 0;
  306. int div = 0;
  307. int idx = 0;
  308. int ret = 0;
  309. if (IS_ERR_OR_NULL(c0_tre))
  310. return c0_tre;
  311. if (mode & SPI_LOOP)
  312. flags |= GSI_LOOPBACK_EN;
  313. if (mode & SPI_CPOL)
  314. flags |= GSI_CPOL;
  315. if (mode & SPI_CPHA)
  316. flags |= GSI_CPHA;
  317. word_len = xfer->bits_per_word - MIN_WORD_LEN;
  318. pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
  319. ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
  320. if (ret) {
  321. dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret);
  322. return ERR_PTR(ret);
  323. }
  324. c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
  325. word_len);
  326. c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
  327. inter_words_delay);
  328. c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
  329. c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 1);
  330. GENI_SE_DBG(mas->ipc, false, mas->dev,
  331. "%s: flags 0x%x word %d pack %d idx %d div %d\n",
  332. __func__, flags, word_len, pack, idx, div);
  333. GENI_SE_DBG(mas->ipc, false, mas->dev,
  334. "%s: cs_clk_delay %d inter_words_delay %d\n", __func__,
  335. cs_clk_delay, inter_words_delay);
  336. return c0_tre;
  337. }
  338. static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags,
  339. struct spi_geni_master *mas)
  340. {
  341. struct msm_gpi_tre *go_tre = &mas->gsi[mas->num_xfers].go_tre;
  342. int chain;
  343. int eot;
  344. int eob;
  345. if (IS_ERR_OR_NULL(go_tre))
  346. return go_tre;
  347. go_tre->dword[0] = MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, cmd);
  348. go_tre->dword[1] = MSM_GPI_SPI_GO_TRE_DWORD1;
  349. go_tre->dword[2] = MSM_GPI_SPI_GO_TRE_DWORD2(rx_len);
  350. if (cmd == SPI_RX_ONLY) {
  351. eot = 0;
  352. chain = 0;
  353. eob = 1;
  354. } else {
  355. eot = 0;
  356. chain = 1;
  357. eob = 0;
  358. }
  359. go_tre->dword[3] = MSM_GPI_SPI_GO_TRE_DWORD3(0, eot, eob, chain);
  360. GENI_SE_DBG(mas->ipc, false, mas->dev,
  361. "%s: rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
  362. __func__, rx_len, flags, cs, cmd, eot, eob, chain);
  363. return go_tre;
  364. }
  365. static struct msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre,
  366. dma_addr_t buf, u32 len,
  367. struct spi_geni_master *mas,
  368. bool is_tx)
  369. {
  370. if (IS_ERR_OR_NULL(tre))
  371. return tre;
  372. tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
  373. tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
  374. tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
  375. tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, is_tx, 0, 0);
  376. return tre;
  377. }
  378. static void spi_gsi_ch_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb,
  379. void *ptr)
  380. {
  381. struct spi_master *spi = ptr;
  382. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  383. switch (cb->cb_event) {
  384. case MSM_GPI_QUP_NOTIFY:
  385. case MSM_GPI_QUP_MAX_EVENT:
  386. GENI_SE_DBG(mas->ipc, false, mas->dev,
  387. "%s:cb_ev%d status%llu ts%llu count%llu\n",
  388. __func__, cb->cb_event, cb->status,
  389. cb->timestamp, cb->count);
  390. break;
  391. case MSM_GPI_QUP_ERROR:
  392. case MSM_GPI_QUP_CH_ERROR:
  393. case MSM_GPI_QUP_FW_ERROR:
  394. case MSM_GPI_QUP_PENDING_EVENT:
  395. case MSM_GPI_QUP_EOT_DESC_MISMATCH:
  396. case MSM_GPI_QUP_SW_ERROR:
  397. GENI_SE_ERR(mas->ipc, true, mas->dev,
  398. "%s: cb_ev %d status %llu ts %llu count %llu\n",
  399. __func__, cb->cb_event, cb->status,
  400. cb->timestamp, cb->count);
  401. GENI_SE_ERR(mas->ipc, true, mas->dev,
  402. "err.routine %u, err.type %u, err.code %u\n",
  403. cb->error_log.routine,
  404. cb->error_log.type,
  405. cb->error_log.error_code);
  406. mas->qn_err = true;
  407. complete_all(&mas->tx_cb);
  408. complete_all(&mas->rx_cb);
  409. break;
  410. };
  411. }
  412. static void spi_gsi_rx_callback(void *cb)
  413. {
  414. struct msm_gpi_dma_async_tx_cb_param *cb_param =
  415. (struct msm_gpi_dma_async_tx_cb_param *)cb;
  416. struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
  417. struct spi_master *spi = desc_cb->spi;
  418. struct spi_transfer *xfer = desc_cb->xfer;
  419. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  420. if (xfer->rx_buf) {
  421. if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
  422. GENI_SE_ERR(mas->ipc, true, mas->dev,
  423. "%s: Unexpected GSI CB error\n", __func__);
  424. return;
  425. }
  426. if (cb_param->length == xfer->len) {
  427. GENI_SE_DBG(mas->ipc, false, mas->dev,
  428. "%s\n", __func__);
  429. complete(&mas->rx_cb);
  430. } else {
  431. GENI_SE_ERR(mas->ipc, true, mas->dev,
  432. "%s: Length mismatch. Expected %d Callback %d\n",
  433. __func__, xfer->len, cb_param->length);
  434. }
  435. }
  436. }
  437. static void spi_gsi_tx_callback(void *cb)
  438. {
  439. struct msm_gpi_dma_async_tx_cb_param *cb_param = cb;
  440. struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
  441. struct spi_master *spi = desc_cb->spi;
  442. struct spi_transfer *xfer = desc_cb->xfer;
  443. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  444. if (xfer->tx_buf) {
  445. if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
  446. GENI_SE_ERR(mas->ipc, true, mas->dev,
  447. "%s: Unexpected GSI CB error\n", __func__);
  448. return;
  449. }
  450. if (cb_param->length == xfer->len) {
  451. GENI_SE_DBG(mas->ipc, false, mas->dev,
  452. "%s\n", __func__);
  453. complete(&mas->tx_cb);
  454. } else {
  455. GENI_SE_ERR(mas->ipc, true, mas->dev,
  456. "%s: Length mismatch. Expected %d Callback %d\n",
  457. __func__, xfer->len, cb_param->length);
  458. }
  459. }
  460. }
  461. static int setup_gsi_xfer(struct spi_transfer *xfer,
  462. struct spi_geni_master *mas,
  463. struct spi_device *spi_slv,
  464. struct spi_master *spi)
  465. {
  466. int ret = 0;
  467. struct msm_gpi_tre *c0_tre = NULL;
  468. struct msm_gpi_tre *go_tre = NULL;
  469. struct msm_gpi_tre *tx_tre = NULL;
  470. struct msm_gpi_tre *rx_tre = NULL;
  471. struct scatterlist *xfer_tx_sg = mas->gsi[mas->num_xfers].tx_sg;
  472. struct scatterlist *xfer_rx_sg = &mas->gsi[mas->num_xfers].rx_sg;
  473. int rx_nent = 0;
  474. int tx_nent = 0;
  475. u8 cmd = 0;
  476. u8 cs = 0;
  477. u32 rx_len = 0;
  478. int go_flags = 0;
  479. unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  480. struct spi_geni_qcom_ctrl_data *delay_params = NULL;
  481. u32 cs_clk_delay = 0;
  482. u32 inter_words_delay = 0;
  483. if (spi_slv->controller_data) {
  484. delay_params =
  485. (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
  486. cs_clk_delay =
  487. delay_params->spi_cs_clk_delay;
  488. inter_words_delay =
  489. delay_params->spi_inter_words_delay;
  490. }
  491. if ((xfer->bits_per_word != mas->cur_word_len) ||
  492. (xfer->speed_hz != mas->cur_speed_hz)) {
  493. mas->cur_word_len = xfer->bits_per_word;
  494. mas->cur_speed_hz = xfer->speed_hz;
  495. tx_nent++;
  496. c0_tre = setup_config0_tre(xfer, mas, spi, spi_slv->mode,
  497. cs_clk_delay, inter_words_delay);
  498. if (IS_ERR_OR_NULL(c0_tre)) {
  499. dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
  500. __func__, ret);
  501. return PTR_ERR(c0_tre);
  502. }
  503. }
  504. if (!(mas->cur_word_len % MIN_WORD_LEN)) {
  505. rx_len = ((xfer->len << 3) / mas->cur_word_len);
  506. } else {
  507. int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
  508. rx_len = (xfer->len / bytes_per_word);
  509. }
  510. if (xfer->tx_buf && xfer->rx_buf) {
  511. cmd = SPI_FULL_DUPLEX;
  512. tx_nent += 2;
  513. rx_nent++;
  514. } else if (xfer->tx_buf) {
  515. cmd = SPI_TX_ONLY;
  516. tx_nent += 2;
  517. rx_len = 0;
  518. } else if (xfer->rx_buf) {
  519. cmd = SPI_RX_ONLY;
  520. tx_nent++;
  521. rx_nent++;
  522. }
  523. cs |= spi_slv->chip_select;
  524. if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers) ==
  525. !xfer->cs_change)
  526. go_flags |= FRAGMENTATION;
  527. go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas);
  528. sg_init_table(xfer_tx_sg, tx_nent);
  529. if (rx_nent)
  530. sg_init_table(xfer_rx_sg, rx_nent);
  531. if (c0_tre)
  532. sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
  533. sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
  534. mas->gsi[mas->num_xfers].desc_cb.spi = spi;
  535. mas->gsi[mas->num_xfers].desc_cb.xfer = xfer;
  536. if (cmd & SPI_RX_ONLY) {
  537. rx_tre = &mas->gsi[mas->num_xfers].rx_dma_tre;
  538. rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->len, mas, 0);
  539. if (IS_ERR_OR_NULL(rx_tre)) {
  540. dev_err(mas->dev, "Err setting up rx tre\n");
  541. return PTR_ERR(rx_tre);
  542. }
  543. sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
  544. mas->gsi[mas->num_xfers].rx_desc =
  545. dmaengine_prep_slave_sg(mas->rx,
  546. &mas->gsi[mas->num_xfers].rx_sg, rx_nent,
  547. DMA_DEV_TO_MEM, flags);
  548. if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].rx_desc)) {
  549. dev_err(mas->dev, "Err setting up rx desc\n");
  550. return -EIO;
  551. }
  552. mas->gsi[mas->num_xfers].rx_desc->callback =
  553. spi_gsi_rx_callback;
  554. mas->gsi[mas->num_xfers].rx_desc->callback_param =
  555. &mas->gsi[mas->num_xfers].rx_cb_param;
  556. mas->gsi[mas->num_xfers].rx_cb_param.userdata =
  557. &mas->gsi[mas->num_xfers].desc_cb;
  558. mas->num_rx_eot++;
  559. }
  560. if (cmd & SPI_TX_ONLY) {
  561. tx_tre = &mas->gsi[mas->num_xfers].tx_dma_tre;
  562. tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->len, mas, 1);
  563. if (IS_ERR_OR_NULL(tx_tre)) {
  564. dev_err(mas->dev, "Err setting up tx tre\n");
  565. return PTR_ERR(tx_tre);
  566. }
  567. sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
  568. mas->num_tx_eot++;
  569. }
  570. mas->gsi[mas->num_xfers].tx_desc = dmaengine_prep_slave_sg(mas->tx,
  571. mas->gsi[mas->num_xfers].tx_sg, tx_nent,
  572. DMA_MEM_TO_DEV, flags);
  573. if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].tx_desc)) {
  574. dev_err(mas->dev, "Err setting up tx desc\n");
  575. return -EIO;
  576. }
  577. mas->gsi[mas->num_xfers].tx_desc->callback = spi_gsi_tx_callback;
  578. mas->gsi[mas->num_xfers].tx_desc->callback_param =
  579. &mas->gsi[mas->num_xfers].tx_cb_param;
  580. mas->gsi[mas->num_xfers].tx_cb_param.userdata =
  581. &mas->gsi[mas->num_xfers].desc_cb;
  582. mas->gsi[mas->num_xfers].tx_cookie =
  583. dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
  584. if (cmd & SPI_RX_ONLY)
  585. mas->gsi[mas->num_xfers].rx_cookie =
  586. dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
  587. dma_async_issue_pending(mas->tx);
  588. if (cmd & SPI_RX_ONLY)
  589. dma_async_issue_pending(mas->rx);
  590. mas->num_xfers++;
  591. return ret;
  592. }
  593. static int spi_geni_map_buf(struct spi_geni_master *mas,
  594. struct spi_message *msg)
  595. {
  596. struct spi_transfer *xfer;
  597. int ret = 0;
  598. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  599. if (xfer->rx_buf) {
  600. ret = geni_se_iommu_map_buf(mas->wrapper_dev,
  601. &xfer->rx_dma, xfer->rx_buf,
  602. xfer->len, DMA_FROM_DEVICE);
  603. if (ret) {
  604. GENI_SE_ERR(mas->ipc, true, mas->dev,
  605. "%s: Mapping Rx buffer %d\n", __func__, ret);
  606. return ret;
  607. }
  608. }
  609. if (xfer->tx_buf) {
  610. ret = geni_se_iommu_map_buf(mas->wrapper_dev,
  611. &xfer->tx_dma,
  612. (void *)xfer->tx_buf,
  613. xfer->len, DMA_TO_DEVICE);
  614. if (ret) {
  615. GENI_SE_ERR(mas->ipc, true, mas->dev,
  616. "%s: Mapping Tx buffer %d\n", __func__, ret);
  617. return ret;
  618. }
  619. }
  620. };
  621. return 0;
  622. }
  623. static void spi_geni_unmap_buf(struct spi_geni_master *mas,
  624. struct spi_message *msg)
  625. {
  626. struct spi_transfer *xfer;
  627. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  628. if (xfer->rx_buf)
  629. geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->rx_dma,
  630. xfer->len, DMA_FROM_DEVICE);
  631. if (xfer->tx_buf)
  632. geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->tx_dma,
  633. xfer->len, DMA_TO_DEVICE);
  634. };
  635. }
  636. static int spi_geni_prepare_message(struct spi_master *spi,
  637. struct spi_message *spi_msg)
  638. {
  639. int ret = 0;
  640. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  641. mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
  642. if (mas->cur_xfer_mode == FIFO_MODE) {
  643. geni_se_select_mode(mas->base, FIFO_MODE);
  644. reinit_completion(&mas->xfer_done);
  645. ret = setup_fifo_params(spi_msg->spi, spi);
  646. } else if (mas->cur_xfer_mode == GSI_DMA) {
  647. mas->num_tx_eot = 0;
  648. mas->num_rx_eot = 0;
  649. mas->num_xfers = 0;
  650. reinit_completion(&mas->tx_cb);
  651. reinit_completion(&mas->rx_cb);
  652. memset(mas->gsi, 0,
  653. (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
  654. geni_se_select_mode(mas->base, GSI_DMA);
  655. ret = spi_geni_map_buf(mas, spi_msg);
  656. } else {
  657. dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
  658. mas->cur_xfer_mode);
  659. ret = -EINVAL;
  660. }
  661. return ret;
  662. }
  663. static int spi_geni_unprepare_message(struct spi_master *spi_mas,
  664. struct spi_message *spi_msg)
  665. {
  666. struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
  667. mas->cur_speed_hz = 0;
  668. mas->cur_word_len = 0;
  669. if (mas->cur_xfer_mode == GSI_DMA)
  670. spi_geni_unmap_buf(mas, spi_msg);
  671. return 0;
  672. }
  673. static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
  674. {
  675. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  676. int ret = 0, count = 0;
  677. u32 max_speed = spi->cur_msg->spi->max_speed_hz;
  678. struct se_geni_rsc *rsc = &mas->spi_rsc;
  679. /* Adjust the AB/IB based on the max speed of the slave.*/
  680. rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
  681. rsc->ab = max_speed * DEFAULT_BUS_WIDTH;
  682. if (mas->shared_se) {
  683. struct se_geni_rsc *rsc;
  684. int ret = 0;
  685. rsc = &mas->spi_rsc;
  686. ret = pinctrl_select_state(rsc->geni_pinctrl,
  687. rsc->geni_gpio_active);
  688. if (ret)
  689. GENI_SE_ERR(mas->ipc, false, NULL,
  690. "%s: Error %d pinctrl_select_state\n", __func__, ret);
  691. }
  692. ret = pm_runtime_get_sync(mas->dev);
  693. if (ret < 0) {
  694. dev_err(mas->dev, "%s:Error enabling SE resources %d\n",
  695. __func__, ret);
  696. pm_runtime_put_noidle(mas->dev);
  697. goto exit_prepare_transfer_hardware;
  698. } else {
  699. ret = 0;
  700. }
  701. if (mas->dis_autosuspend) {
  702. count = atomic_read(&mas->dev->power.usage_count);
  703. if (count <= 0)
  704. GENI_SE_ERR(mas->ipc, false, NULL,
  705. "resume usage count mismatch:%d", count);
  706. }
  707. if (unlikely(!mas->setup)) {
  708. int proto = get_se_proto(mas->base);
  709. unsigned int major;
  710. unsigned int minor;
  711. unsigned int step;
  712. int hw_ver;
  713. if (unlikely(proto != SPI)) {
  714. dev_err(mas->dev, "Invalid proto %d\n", proto);
  715. return -ENXIO;
  716. }
  717. geni_se_init(mas->base, 0x0, (mas->tx_fifo_depth - 2));
  718. mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
  719. mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
  720. mas->tx_fifo_width = get_tx_fifo_width(mas->base);
  721. mas->oversampling = 1;
  722. /* Transmit an entire FIFO worth of data per IRQ */
  723. mas->tx_wm = 1;
  724. mas->tx = dma_request_slave_channel(mas->dev, "tx");
  725. if (IS_ERR_OR_NULL(mas->tx)) {
  726. dev_info(mas->dev, "Failed to get tx DMA ch %ld",
  727. PTR_ERR(mas->tx));
  728. } else {
  729. mas->rx = dma_request_slave_channel(mas->dev, "rx");
  730. if (IS_ERR_OR_NULL(mas->rx)) {
  731. dev_info(mas->dev, "Failed to get rx DMA ch %ld",
  732. PTR_ERR(mas->rx));
  733. dma_release_channel(mas->tx);
  734. }
  735. mas->gsi = devm_kzalloc(mas->dev,
  736. (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER),
  737. GFP_KERNEL);
  738. if (IS_ERR_OR_NULL(mas->gsi)) {
  739. dev_err(mas->dev, "Failed to get GSI mem\n");
  740. dma_release_channel(mas->tx);
  741. dma_release_channel(mas->rx);
  742. mas->tx = NULL;
  743. mas->rx = NULL;
  744. goto setup_ipc;
  745. }
  746. mas->tx_event.init.callback = spi_gsi_ch_cb;
  747. mas->tx_event.init.cb_param = spi;
  748. mas->tx_event.cmd = MSM_GPI_INIT;
  749. mas->tx->private = &mas->tx_event;
  750. mas->rx_event.init.callback = spi_gsi_ch_cb;
  751. mas->rx_event.init.cb_param = spi;
  752. mas->rx_event.cmd = MSM_GPI_INIT;
  753. mas->rx->private = &mas->rx_event;
  754. if (dmaengine_slave_config(mas->tx, NULL)) {
  755. dev_err(mas->dev, "Failed to Config Tx\n");
  756. dma_release_channel(mas->tx);
  757. dma_release_channel(mas->rx);
  758. mas->tx = NULL;
  759. mas->rx = NULL;
  760. goto setup_ipc;
  761. }
  762. if (dmaengine_slave_config(mas->rx, NULL)) {
  763. dev_err(mas->dev, "Failed to Config Rx\n");
  764. dma_release_channel(mas->tx);
  765. dma_release_channel(mas->rx);
  766. mas->tx = NULL;
  767. mas->rx = NULL;
  768. goto setup_ipc;
  769. }
  770. }
  771. setup_ipc:
  772. mas->ipc = ipc_log_context_create(4, dev_name(mas->dev), 0);
  773. dev_info(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
  774. mas->tx_fifo_depth, mas->rx_fifo_depth,
  775. mas->tx_fifo_width);
  776. mas->setup = true;
  777. hw_ver = geni_se_qupv3_hw_version(mas->wrapper_dev, &major,
  778. &minor, &step);
  779. if (hw_ver)
  780. dev_err(mas->dev, "%s:Err getting HW version %d\n",
  781. __func__, hw_ver);
  782. else {
  783. if ((major == 1) && (minor == 0))
  784. mas->oversampling = 2;
  785. GENI_SE_DBG(mas->ipc, false, mas->dev,
  786. "%s:Major:%d Minor:%d step:%dos%d\n",
  787. __func__, major, minor, step, mas->oversampling);
  788. }
  789. mas->shared_se =
  790. (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
  791. FIFO_IF_DISABLE);
  792. if (mas->dis_autosuspend)
  793. GENI_SE_DBG(mas->ipc, false, mas->dev,
  794. "Auto Suspend is disabled\n");
  795. }
  796. if (mas->dis_autosuspend)
  797. dmaengine_resume(mas->tx);
  798. exit_prepare_transfer_hardware:
  799. return ret;
  800. }
  801. static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
  802. {
  803. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  804. int count = 0;
  805. if (mas->shared_se) {
  806. struct se_geni_rsc *rsc;
  807. int ret = 0;
  808. rsc = &mas->spi_rsc;
  809. ret = pinctrl_select_state(rsc->geni_pinctrl,
  810. rsc->geni_gpio_sleep);
  811. if (ret)
  812. GENI_SE_ERR(mas->ipc, false, NULL,
  813. "%s: Error %d pinctrl_select_state\n", __func__, ret);
  814. }
  815. if (mas->dis_autosuspend) {
  816. dmaengine_pause(mas->tx);
  817. pm_runtime_put_sync(mas->dev);
  818. count = atomic_read(&mas->dev->power.usage_count);
  819. if (count < 0)
  820. GENI_SE_ERR(mas->ipc, false, NULL,
  821. "suspend usage count mismatch:%d", count);
  822. } else {
  823. pm_runtime_mark_last_busy(mas->dev);
  824. pm_runtime_put_autosuspend(mas->dev);
  825. }
  826. return 0;
  827. }
  828. static void setup_fifo_xfer(struct spi_transfer *xfer,
  829. struct spi_geni_master *mas, u16 mode,
  830. struct spi_master *spi)
  831. {
  832. u32 m_cmd = 0;
  833. u32 m_param = 0;
  834. u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
  835. u32 trans_len = 0;
  836. if (xfer->bits_per_word != mas->cur_word_len) {
  837. spi_setup_word_len(mas, mode, xfer->bits_per_word);
  838. mas->cur_word_len = xfer->bits_per_word;
  839. }
  840. /* Speed and bits per word can be overridden per transfer */
  841. if (xfer->speed_hz != mas->cur_speed_hz) {
  842. int ret = 0;
  843. u32 clk_sel = 0;
  844. u32 m_clk_cfg = 0;
  845. int idx = 0;
  846. int div = 0;
  847. ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
  848. if (ret) {
  849. dev_err(mas->dev, "%s:Err setting clks:%d\n",
  850. __func__, ret);
  851. return;
  852. }
  853. mas->cur_speed_hz = xfer->speed_hz;
  854. clk_sel |= (idx & CLK_SEL_MSK);
  855. m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
  856. geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
  857. geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
  858. }
  859. mas->tx_rem_bytes = 0;
  860. mas->rx_rem_bytes = 0;
  861. if (xfer->tx_buf && xfer->rx_buf)
  862. m_cmd = SPI_FULL_DUPLEX;
  863. else if (xfer->tx_buf)
  864. m_cmd = SPI_TX_ONLY;
  865. else if (xfer->rx_buf)
  866. m_cmd = SPI_RX_ONLY;
  867. spi_tx_cfg &= ~CS_TOGGLE;
  868. if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers) ==
  869. !xfer->cs_change)
  870. m_param |= FRAGMENTATION;
  871. if (!(mas->cur_word_len % MIN_WORD_LEN)) {
  872. trans_len =
  873. ((xfer->len << 3) / mas->cur_word_len) & TRANS_LEN_MSK;
  874. } else {
  875. int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
  876. trans_len = (xfer->len / bytes_per_word) & TRANS_LEN_MSK;
  877. }
  878. mas->cur_xfer = xfer;
  879. if (m_cmd & SPI_TX_ONLY) {
  880. mas->tx_rem_bytes = xfer->len;
  881. geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
  882. }
  883. if (m_cmd & SPI_RX_ONLY) {
  884. geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
  885. mas->rx_rem_bytes = xfer->len;
  886. }
  887. geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
  888. geni_setup_m_cmd(mas->base, m_cmd, m_param);
  889. GENI_SE_DBG(mas->ipc, false, mas->dev,
  890. "%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs %d\n",
  891. __func__, trans_len, xfer->len, spi_tx_cfg, m_cmd,
  892. xfer->cs_change);
  893. if (m_cmd & SPI_TX_ONLY)
  894. geni_write_reg(mas->tx_wm, mas->base, SE_GENI_TX_WATERMARK_REG);
  895. /* Ensure all writes are done before the WM interrupt */
  896. mb();
  897. }
  898. static void handle_fifo_timeout(struct spi_geni_master *mas)
  899. {
  900. unsigned long timeout;
  901. geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
  902. reinit_completion(&mas->xfer_done);
  903. geni_cancel_m_cmd(mas->base);
  904. geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
  905. /* Ensure cmd cancel is written */
  906. mb();
  907. timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
  908. if (!timeout) {
  909. reinit_completion(&mas->xfer_done);
  910. geni_abort_m_cmd(mas->base);
  911. /* Ensure cmd abort is written */
  912. mb();
  913. timeout = wait_for_completion_timeout(&mas->xfer_done,
  914. HZ);
  915. if (!timeout)
  916. dev_err(mas->dev,
  917. "Failed to cancel/abort m_cmd\n");
  918. }
  919. }
  920. static int spi_geni_transfer_one(struct spi_master *spi,
  921. struct spi_device *slv,
  922. struct spi_transfer *xfer)
  923. {
  924. int ret = 0;
  925. struct spi_geni_master *mas = spi_master_get_devdata(spi);
  926. unsigned long timeout;
  927. if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
  928. dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
  929. return -EINVAL;
  930. }
  931. if (mas->cur_xfer_mode == FIFO_MODE) {
  932. setup_fifo_xfer(xfer, mas, slv->mode, spi);
  933. timeout = wait_for_completion_timeout(&mas->xfer_done,
  934. msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
  935. if (!timeout) {
  936. GENI_SE_ERR(mas->ipc, true, mas->dev,
  937. "Xfer[len %d tx %pK rx %pK n %d] timed out.\n",
  938. xfer->len, xfer->tx_buf,
  939. xfer->rx_buf,
  940. xfer->bits_per_word);
  941. mas->cur_xfer = NULL;
  942. ret = -ETIMEDOUT;
  943. goto err_fifo_geni_transfer_one;
  944. }
  945. } else {
  946. setup_gsi_xfer(xfer, mas, slv, spi);
  947. if ((mas->num_xfers >= NUM_SPI_XFER) ||
  948. (list_is_last(&xfer->transfer_list,
  949. &spi->cur_msg->transfers))) {
  950. int i;
  951. for (i = 0 ; i < mas->num_tx_eot; i++) {
  952. timeout =
  953. wait_for_completion_timeout(
  954. &mas->tx_cb,
  955. msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
  956. if (timeout <= 0) {
  957. GENI_SE_ERR(mas->ipc, true, mas->dev,
  958. "Tx[%d] timeout%lu\n", i, timeout);
  959. ret = -ETIMEDOUT;
  960. goto err_gsi_geni_transfer_one;
  961. }
  962. }
  963. for (i = 0 ; i < mas->num_rx_eot; i++) {
  964. timeout =
  965. wait_for_completion_timeout(
  966. &mas->rx_cb,
  967. msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
  968. if (timeout <= 0) {
  969. GENI_SE_ERR(mas->ipc, true, mas->dev,
  970. "Rx[%d] timeout%lu\n", i, timeout);
  971. ret = -ETIMEDOUT;
  972. goto err_gsi_geni_transfer_one;
  973. }
  974. }
  975. if (mas->qn_err) {
  976. ret = -EIO;
  977. mas->qn_err = false;
  978. goto err_gsi_geni_transfer_one;
  979. }
  980. }
  981. }
  982. return ret;
  983. err_gsi_geni_transfer_one:
  984. geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
  985. dmaengine_terminate_all(mas->tx);
  986. return ret;
  987. err_fifo_geni_transfer_one:
  988. handle_fifo_timeout(mas);
  989. return ret;
  990. }
  991. static void geni_spi_handle_tx(struct spi_geni_master *mas)
  992. {
  993. int i = 0;
  994. int tx_fifo_width = (mas->tx_fifo_width >> 3);
  995. int max_bytes = 0;
  996. const u8 *tx_buf = NULL;
  997. if (!mas->cur_xfer)
  998. return;
  999. /*
  1000. * For non-byte aligned bits-per-word values:
  1001. * Assumption is that each SPI word will be accomodated in
  1002. * ceil (bits_per_word / bits_per_byte)
  1003. * and the next SPI word starts at the next byte.
  1004. * In such cases, we can fit 1 SPI word per FIFO word so adjust the
  1005. * max byte that can be sent per IRQ accordingly.
  1006. */
  1007. if ((mas->tx_fifo_width % mas->cur_word_len))
  1008. max_bytes = (mas->tx_fifo_depth - mas->tx_wm) *
  1009. ((mas->cur_word_len / BITS_PER_BYTE) + 1);
  1010. else
  1011. max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
  1012. tx_buf = mas->cur_xfer->tx_buf;
  1013. tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
  1014. max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
  1015. while (i < max_bytes) {
  1016. int j;
  1017. u32 fifo_word = 0;
  1018. u8 *fifo_byte;
  1019. int bytes_per_fifo = tx_fifo_width;
  1020. int bytes_to_write = 0;
  1021. if ((mas->tx_fifo_width % mas->cur_word_len))
  1022. bytes_per_fifo =
  1023. (mas->cur_word_len / BITS_PER_BYTE) + 1;
  1024. bytes_to_write = min_t(int, (max_bytes - i), bytes_per_fifo);
  1025. fifo_byte = (u8 *)&fifo_word;
  1026. for (j = 0; j < bytes_to_write; j++)
  1027. fifo_byte[j] = tx_buf[i++];
  1028. geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
  1029. /* Ensure FIFO writes are written in order */
  1030. mb();
  1031. }
  1032. mas->tx_rem_bytes -= max_bytes;
  1033. if (!mas->tx_rem_bytes) {
  1034. geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
  1035. /* Barrier here before return to prevent further ISRs */
  1036. mb();
  1037. }
  1038. }
  1039. static void geni_spi_handle_rx(struct spi_geni_master *mas)
  1040. {
  1041. int i = 0;
  1042. int fifo_width = (mas->tx_fifo_width >> 3);
  1043. u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
  1044. int rx_bytes = 0;
  1045. int rx_wc = 0;
  1046. u8 *rx_buf = NULL;
  1047. if (!mas->cur_xfer)
  1048. return;
  1049. rx_buf = mas->cur_xfer->rx_buf;
  1050. rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
  1051. if (rx_fifo_status & RX_LAST) {
  1052. int rx_last_byte_valid =
  1053. (rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
  1054. >> RX_LAST_BYTE_VALID_SHFT;
  1055. if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
  1056. rx_wc -= 1;
  1057. rx_bytes += rx_last_byte_valid;
  1058. }
  1059. }
  1060. if (!(mas->tx_fifo_width % mas->cur_word_len))
  1061. rx_bytes += rx_wc * fifo_width;
  1062. else
  1063. rx_bytes += rx_wc *
  1064. ((mas->cur_word_len / BITS_PER_BYTE) + 1);
  1065. rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
  1066. rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
  1067. while (i < rx_bytes) {
  1068. u32 fifo_word = 0;
  1069. u8 *fifo_byte;
  1070. int bytes_per_fifo = fifo_width;
  1071. int read_bytes = 0;
  1072. int j;
  1073. if ((mas->tx_fifo_width % mas->cur_word_len))
  1074. bytes_per_fifo =
  1075. (mas->cur_word_len / BITS_PER_BYTE) + 1;
  1076. read_bytes = min_t(int, (rx_bytes - i), bytes_per_fifo);
  1077. fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
  1078. fifo_byte = (u8 *)&fifo_word;
  1079. for (j = 0; j < read_bytes; j++)
  1080. rx_buf[i++] = fifo_byte[j];
  1081. }
  1082. mas->rx_rem_bytes -= rx_bytes;
  1083. }
  1084. static irqreturn_t geni_spi_irq(int irq, void *data)
  1085. {
  1086. struct spi_geni_master *mas = data;
  1087. u32 m_irq = 0;
  1088. if (pm_runtime_status_suspended(mas->dev)) {
  1089. GENI_SE_DBG(mas->ipc, false, mas->dev,
  1090. "%s: device is suspended\n", __func__);
  1091. goto exit_geni_spi_irq;
  1092. }
  1093. m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
  1094. if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
  1095. geni_spi_handle_rx(mas);
  1096. if ((m_irq & M_TX_FIFO_WATERMARK_EN))
  1097. geni_spi_handle_tx(mas);
  1098. if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
  1099. (m_irq & M_CMD_ABORT_EN)) {
  1100. complete(&mas->xfer_done);
  1101. /*
  1102. * If this happens, then a CMD_DONE came before all the buffer
  1103. * bytes were sent out. This is unusual, log this condition and
  1104. * disable the WM interrupt to prevent the system from stalling
  1105. * due an interrupt storm.
  1106. * If this happens when all Rx bytes haven't been received, log
  1107. * the condition.
  1108. */
  1109. if (mas->tx_rem_bytes) {
  1110. geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
  1111. GENI_SE_DBG(mas->ipc, false, mas->dev,
  1112. "%s:Premature Done.tx_rem%d bpw%d\n",
  1113. __func__, mas->tx_rem_bytes, mas->cur_word_len);
  1114. }
  1115. if (mas->rx_rem_bytes)
  1116. GENI_SE_DBG(mas->ipc, false, mas->dev,
  1117. "%s:Premature Done.rx_rem%d bpw%d\n",
  1118. __func__, mas->rx_rem_bytes, mas->cur_word_len);
  1119. }
  1120. exit_geni_spi_irq:
  1121. geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
  1122. return IRQ_HANDLED;
  1123. }
  1124. static int spi_geni_probe(struct platform_device *pdev)
  1125. {
  1126. int ret;
  1127. struct spi_master *spi;
  1128. struct spi_geni_master *geni_mas;
  1129. struct se_geni_rsc *rsc;
  1130. struct resource *res;
  1131. struct platform_device *wrapper_pdev;
  1132. struct device_node *wrapper_ph_node;
  1133. bool rt_pri;
  1134. spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
  1135. if (!spi) {
  1136. ret = -ENOMEM;
  1137. dev_err(&pdev->dev, "Failed to alloc spi struct\n");
  1138. goto spi_geni_probe_err;
  1139. }
  1140. platform_set_drvdata(pdev, spi);
  1141. geni_mas = spi_master_get_devdata(spi);
  1142. rsc = &geni_mas->spi_rsc;
  1143. geni_mas->dev = &pdev->dev;
  1144. spi->dev.of_node = pdev->dev.of_node;
  1145. wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
  1146. "qcom,wrapper-core", 0);
  1147. if (IS_ERR_OR_NULL(wrapper_ph_node)) {
  1148. ret = PTR_ERR(wrapper_ph_node);
  1149. dev_err(&pdev->dev, "No wrapper core defined\n");
  1150. goto spi_geni_probe_err;
  1151. }
  1152. wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
  1153. of_node_put(wrapper_ph_node);
  1154. if (IS_ERR_OR_NULL(wrapper_pdev)) {
  1155. ret = PTR_ERR(wrapper_pdev);
  1156. dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
  1157. goto spi_geni_probe_err;
  1158. }
  1159. geni_mas->wrapper_dev = &wrapper_pdev->dev;
  1160. geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
  1161. ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
  1162. (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
  1163. if (ret) {
  1164. dev_err(&pdev->dev, "Error geni_se_resources_init\n");
  1165. goto spi_geni_probe_err;
  1166. }
  1167. geni_mas->spi_rsc.ctrl_dev = geni_mas->dev;
  1168. rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
  1169. if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
  1170. dev_err(&pdev->dev, "No pinctrl config specified!\n");
  1171. ret = PTR_ERR(rsc->geni_pinctrl);
  1172. goto spi_geni_probe_err;
  1173. }
  1174. rsc->geni_gpio_active = pinctrl_lookup_state(rsc->geni_pinctrl,
  1175. PINCTRL_DEFAULT);
  1176. if (IS_ERR_OR_NULL(rsc->geni_gpio_active)) {
  1177. dev_err(&pdev->dev, "No default config specified!\n");
  1178. ret = PTR_ERR(rsc->geni_gpio_active);
  1179. goto spi_geni_probe_err;
  1180. }
  1181. rsc->geni_gpio_sleep = pinctrl_lookup_state(rsc->geni_pinctrl,
  1182. PINCTRL_SLEEP);
  1183. if (IS_ERR_OR_NULL(rsc->geni_gpio_sleep)) {
  1184. dev_err(&pdev->dev, "No sleep config specified!\n");
  1185. ret = PTR_ERR(rsc->geni_gpio_sleep);
  1186. goto spi_geni_probe_err;
  1187. }
  1188. rsc->se_clk = devm_clk_get(&pdev->dev, "se-clk");
  1189. if (IS_ERR(rsc->se_clk)) {
  1190. ret = PTR_ERR(rsc->se_clk);
  1191. dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
  1192. goto spi_geni_probe_err;
  1193. }
  1194. rsc->m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
  1195. if (IS_ERR(rsc->m_ahb_clk)) {
  1196. ret = PTR_ERR(rsc->m_ahb_clk);
  1197. dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
  1198. goto spi_geni_probe_err;
  1199. }
  1200. rsc->s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
  1201. if (IS_ERR(rsc->s_ahb_clk)) {
  1202. ret = PTR_ERR(rsc->s_ahb_clk);
  1203. dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
  1204. goto spi_geni_probe_err;
  1205. }
  1206. if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
  1207. &spi->max_speed_hz)) {
  1208. dev_err(&pdev->dev, "Max frequency not specified.\n");
  1209. ret = -ENXIO;
  1210. goto spi_geni_probe_err;
  1211. }
  1212. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
  1213. if (!res) {
  1214. ret = -ENXIO;
  1215. dev_err(&pdev->dev, "Err getting IO region\n");
  1216. goto spi_geni_probe_err;
  1217. }
  1218. rt_pri = of_property_read_bool(pdev->dev.of_node, "qcom,rt");
  1219. if (rt_pri)
  1220. spi->rt = true;
  1221. geni_mas->dis_autosuspend =
  1222. of_property_read_bool(pdev->dev.of_node,
  1223. "qcom,disable-autosuspend");
  1224. geni_mas->phys_addr = res->start;
  1225. geni_mas->size = resource_size(res);
  1226. geni_mas->base = devm_ioremap(&pdev->dev, res->start,
  1227. resource_size(res));
  1228. if (!geni_mas->base) {
  1229. ret = -ENOMEM;
  1230. dev_err(&pdev->dev, "Err IO mapping iomem\n");
  1231. goto spi_geni_probe_err;
  1232. }
  1233. geni_mas->irq = platform_get_irq(pdev, 0);
  1234. if (geni_mas->irq < 0) {
  1235. dev_err(&pdev->dev, "Err getting IRQ\n");
  1236. ret = geni_mas->irq;
  1237. goto spi_geni_probe_unmap;
  1238. }
  1239. ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
  1240. IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
  1241. if (ret) {
  1242. dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
  1243. geni_mas->irq, ret);
  1244. goto spi_geni_probe_unmap;
  1245. }
  1246. spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
  1247. spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  1248. spi->num_chipselect = SPI_NUM_CHIPSELECT;
  1249. spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
  1250. spi->prepare_message = spi_geni_prepare_message;
  1251. spi->unprepare_message = spi_geni_unprepare_message;
  1252. spi->transfer_one = spi_geni_transfer_one;
  1253. spi->unprepare_transfer_hardware
  1254. = spi_geni_unprepare_transfer_hardware;
  1255. spi->auto_runtime_pm = false;
  1256. init_completion(&geni_mas->xfer_done);
  1257. init_completion(&geni_mas->tx_cb);
  1258. init_completion(&geni_mas->rx_cb);
  1259. pm_runtime_set_suspended(&pdev->dev);
  1260. if (!geni_mas->dis_autosuspend) {
  1261. pm_runtime_set_autosuspend_delay(&pdev->dev,
  1262. SPI_AUTO_SUSPEND_DELAY);
  1263. pm_runtime_use_autosuspend(&pdev->dev);
  1264. }
  1265. pm_runtime_enable(&pdev->dev);
  1266. ret = spi_register_master(spi);
  1267. if (ret) {
  1268. dev_err(&pdev->dev, "Failed to register SPI master\n");
  1269. goto spi_geni_probe_unmap;
  1270. }
  1271. return ret;
  1272. spi_geni_probe_unmap:
  1273. devm_iounmap(&pdev->dev, geni_mas->base);
  1274. spi_geni_probe_err:
  1275. spi_master_put(spi);
  1276. return ret;
  1277. }
  1278. static int spi_geni_remove(struct platform_device *pdev)
  1279. {
  1280. struct spi_master *master = platform_get_drvdata(pdev);
  1281. struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
  1282. spi_unregister_master(master);
  1283. se_geni_resources_off(&geni_mas->spi_rsc);
  1284. pm_runtime_put_noidle(&pdev->dev);
  1285. pm_runtime_disable(&pdev->dev);
  1286. return 0;
  1287. }
  1288. #ifdef CONFIG_PM
  1289. static int spi_geni_runtime_suspend(struct device *dev)
  1290. {
  1291. int ret = 0;
  1292. struct spi_master *spi = get_spi_master(dev);
  1293. struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
  1294. if (geni_mas->shared_se) {
  1295. ret = se_geni_clks_off(&geni_mas->spi_rsc);
  1296. if (ret)
  1297. GENI_SE_ERR(geni_mas->ipc, false, NULL,
  1298. "%s: Error %d turning off clocks\n", __func__, ret);
  1299. } else {
  1300. ret = se_geni_resources_off(&geni_mas->spi_rsc);
  1301. }
  1302. return ret;
  1303. }
  1304. static int spi_geni_runtime_resume(struct device *dev)
  1305. {
  1306. int ret = 0;
  1307. struct spi_master *spi = get_spi_master(dev);
  1308. struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
  1309. if (geni_mas->shared_se) {
  1310. ret = se_geni_clks_on(&geni_mas->spi_rsc);
  1311. if (ret)
  1312. GENI_SE_ERR(geni_mas->ipc, false, NULL,
  1313. "%s: Error %d turning on clocks\n", __func__, ret);
  1314. } else {
  1315. ret = se_geni_resources_on(&geni_mas->spi_rsc);
  1316. }
  1317. return ret;
  1318. }
  1319. static int spi_geni_resume(struct device *dev)
  1320. {
  1321. return 0;
  1322. }
  1323. static int spi_geni_suspend(struct device *dev)
  1324. {
  1325. int ret = 0;
  1326. if (!pm_runtime_status_suspended(dev)) {
  1327. struct spi_master *spi = get_spi_master(dev);
  1328. struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
  1329. if (list_empty(&spi->queue) && !spi->cur_msg) {
  1330. GENI_SE_ERR(geni_mas->ipc, true, dev,
  1331. "%s: Force suspend", __func__);
  1332. ret = spi_geni_runtime_suspend(dev);
  1333. if (ret) {
  1334. GENI_SE_ERR(geni_mas->ipc, true, dev,
  1335. "Force suspend Failed:%d", ret);
  1336. } else {
  1337. pm_runtime_disable(dev);
  1338. pm_runtime_set_suspended(dev);
  1339. pm_runtime_enable(dev);
  1340. }
  1341. } else {
  1342. ret = -EBUSY;
  1343. }
  1344. }
  1345. return ret;
  1346. }
  1347. #else
  1348. static int spi_geni_runtime_suspend(struct device *dev)
  1349. {
  1350. return 0;
  1351. }
  1352. static int spi_geni_runtime_resume(struct device *dev)
  1353. {
  1354. return 0;
  1355. }
  1356. static int spi_geni_resume(struct device *dev)
  1357. {
  1358. return 0;
  1359. }
  1360. static int spi_geni_suspend(struct device *dev)
  1361. {
  1362. return 0;
  1363. }
  1364. #endif
  1365. static const struct dev_pm_ops spi_geni_pm_ops = {
  1366. SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
  1367. spi_geni_runtime_resume, NULL)
  1368. SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
  1369. };
  1370. static const struct of_device_id spi_geni_dt_match[] = {
  1371. { .compatible = "qcom,spi-geni" },
  1372. {}
  1373. };
  1374. static struct platform_driver spi_geni_driver = {
  1375. .probe = spi_geni_probe,
  1376. .remove = spi_geni_remove,
  1377. .driver = {
  1378. .name = "spi_geni",
  1379. .pm = &spi_geni_pm_ops,
  1380. .of_match_table = spi_geni_dt_match,
  1381. },
  1382. };
  1383. module_platform_driver(spi_geni_driver);
  1384. MODULE_LICENSE("GPL v2");
  1385. MODULE_ALIAS("platform:spi_geni");