spi_qsd.c 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786
  1. /* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. /*
  14. * SPI driver for Qualcomm MSM platforms
  15. *
  16. */
  17. #include <linux/version.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/list.h>
  23. #include <linux/irq.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/spi/spi.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/err.h>
  28. #include <linux/clk.h>
  29. #include <linux/delay.h>
  30. #include <linux/workqueue.h>
  31. #include <linux/io.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/gpio.h>
  34. #include <linux/of.h>
  35. #include <linux/of_gpio.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/sched.h>
  38. #include <linux/mutex.h>
  39. #include <linux/atomic.h>
  40. #include <linux/pm_runtime.h>
  41. #include <linux/spi/qcom-spi.h>
  42. #include <linux/msm-sps.h>
  43. #include <linux/msm-bus.h>
  44. #include <linux/msm-bus-board.h>
  45. #include "spi_qsd.h"
  46. #define SPI_MAX_BYTES_PER_WORD (4)
  47. static int msm_spi_pm_resume_runtime(struct device *device);
  48. static int msm_spi_pm_suspend_runtime(struct device *device);
  49. static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
  50. static int get_local_resources(struct msm_spi *dd);
  51. static void put_local_resources(struct msm_spi *dd);
  52. static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
  53. struct platform_device *pdev)
  54. {
  55. struct resource *resource;
  56. unsigned long gsbi_mem_phys_addr;
  57. size_t gsbi_mem_size;
  58. void __iomem *gsbi_base;
  59. resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  60. if (!resource)
  61. return 0;
  62. gsbi_mem_phys_addr = resource->start;
  63. gsbi_mem_size = resource_size(resource);
  64. if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
  65. gsbi_mem_size, SPI_DRV_NAME))
  66. return -ENXIO;
  67. gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
  68. gsbi_mem_size);
  69. if (!gsbi_base)
  70. return -ENXIO;
  71. /* Set GSBI to SPI mode */
  72. writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
  73. return 0;
  74. }
  75. static inline void msm_spi_register_init(struct msm_spi *dd)
  76. {
  77. writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
  78. msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  79. writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
  80. writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
  81. writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
  82. if (dd->qup_ver)
  83. writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
  84. }
  85. static int msm_spi_pinctrl_init(struct msm_spi *dd)
  86. {
  87. dd->pinctrl = devm_pinctrl_get(dd->dev);
  88. if (IS_ERR_OR_NULL(dd->pinctrl)) {
  89. dev_err(dd->dev, "Failed to get pin ctrl\n");
  90. return PTR_ERR(dd->pinctrl);
  91. }
  92. dd->pins_active = pinctrl_lookup_state(dd->pinctrl,
  93. SPI_PINCTRL_STATE_DEFAULT);
  94. if (IS_ERR_OR_NULL(dd->pins_active)) {
  95. dev_err(dd->dev, "Failed to lookup pinctrl default state\n");
  96. return PTR_ERR(dd->pins_active);
  97. }
  98. dd->pins_sleep = pinctrl_lookup_state(dd->pinctrl,
  99. SPI_PINCTRL_STATE_SLEEP);
  100. if (IS_ERR_OR_NULL(dd->pins_sleep)) {
  101. dev_err(dd->dev, "Failed to lookup pinctrl sleep state\n");
  102. return PTR_ERR(dd->pins_sleep);
  103. }
  104. return 0;
  105. }
  106. static inline int msm_spi_request_gpios(struct msm_spi *dd)
  107. {
  108. int i = 0;
  109. int result = 0;
  110. if (!dd->pdata->use_pinctrl) {
  111. for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
  112. if (dd->spi_gpios[i] >= 0) {
  113. result = gpio_request(dd->spi_gpios[i],
  114. spi_rsrcs[i]);
  115. if (result) {
  116. dev_err(dd->dev,
  117. "error %d gpio_request for pin %d\n",
  118. result, dd->spi_gpios[i]);
  119. goto error;
  120. }
  121. }
  122. }
  123. } else {
  124. result = pinctrl_select_state(dd->pinctrl, dd->pins_active);
  125. if (result) {
  126. dev_err(dd->dev, "%s: Can not set %s pins\n",
  127. __func__, SPI_PINCTRL_STATE_DEFAULT);
  128. goto error;
  129. }
  130. }
  131. return 0;
  132. error:
  133. if (!dd->pdata->use_pinctrl) {
  134. for (; --i >= 0;) {
  135. if (dd->spi_gpios[i] >= 0)
  136. gpio_free(dd->spi_gpios[i]);
  137. }
  138. }
  139. return result;
  140. }
  141. static inline void msm_spi_free_gpios(struct msm_spi *dd)
  142. {
  143. int i;
  144. int result = 0;
  145. if (!dd->pdata->use_pinctrl) {
  146. for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
  147. if (dd->spi_gpios[i] >= 0)
  148. gpio_free(dd->spi_gpios[i]);
  149. }
  150. for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
  151. if (dd->cs_gpios[i].valid) {
  152. gpio_free(dd->cs_gpios[i].gpio_num);
  153. dd->cs_gpios[i].valid = 0;
  154. }
  155. }
  156. } else {
  157. result = pinctrl_select_state(dd->pinctrl, dd->pins_sleep);
  158. if (result)
  159. dev_err(dd->dev, "%s: Can not set %s pins\n",
  160. __func__, SPI_PINCTRL_STATE_SLEEP);
  161. }
  162. }
  163. static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
  164. {
  165. int cs_num;
  166. int rc;
  167. cs_num = dd->spi->chip_select;
  168. if (!(dd->spi->mode & SPI_LOOP)) {
  169. if (!dd->pdata->use_pinctrl) {
  170. if ((!(dd->cs_gpios[cs_num].valid)) &&
  171. (dd->cs_gpios[cs_num].gpio_num >= 0)) {
  172. rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
  173. spi_cs_rsrcs[cs_num]);
  174. if (rc) {
  175. dev_err(dd->dev,
  176. "gpio_request for pin %d failed,error %d\n",
  177. dd->cs_gpios[cs_num].gpio_num, rc);
  178. return rc;
  179. }
  180. dd->cs_gpios[cs_num].valid = 1;
  181. }
  182. }
  183. }
  184. return 0;
  185. }
  186. static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
  187. {
  188. int cs_num;
  189. cs_num = dd->spi->chip_select;
  190. if (!dd->pdata->use_pinctrl) {
  191. if (dd->cs_gpios[cs_num].valid) {
  192. gpio_free(dd->cs_gpios[cs_num].gpio_num);
  193. dd->cs_gpios[cs_num].valid = 0;
  194. }
  195. }
  196. }
  197. /**
  198. * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
  199. * @clk the clock for which to find nearest lower rate
  200. * @rate clock frequency in Hz
  201. * @return nearest lower rate or negative error value
  202. *
  203. * Public clock API extends clk_round_rate which is a ceiling function. This
  204. * function is a floor function implemented as a binary search using the
  205. * ceiling function.
  206. */
  207. static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
  208. {
  209. long lowest_available, nearest_low, step_size, cur;
  210. long step_direction = -1;
  211. long guess = rate;
  212. int max_steps = 10;
  213. cur = clk_round_rate(clk, rate);
  214. if (cur == rate)
  215. return rate;
  216. /* if we got here then: cur > rate */
  217. lowest_available = clk_round_rate(clk, 0);
  218. if (lowest_available > rate)
  219. return -EINVAL;
  220. step_size = (rate - lowest_available) >> 1;
  221. nearest_low = lowest_available;
  222. while (max_steps-- && step_size) {
  223. guess += step_size * step_direction;
  224. cur = clk_round_rate(clk, guess);
  225. if ((cur < rate) && (cur > nearest_low))
  226. nearest_low = cur;
  227. /*
  228. * if we stepped too far, then start stepping in the other
  229. * direction with half the step size
  230. */
  231. if (((cur > rate) && (step_direction > 0))
  232. || ((cur < rate) && (step_direction < 0))) {
  233. step_direction = -step_direction;
  234. step_size >>= 1;
  235. }
  236. }
  237. return nearest_low;
  238. }
  239. static void msm_spi_clock_set(struct msm_spi *dd, int speed)
  240. {
  241. long rate;
  242. int rc;
  243. rate = msm_spi_clk_max_rate(dd->clk, speed);
  244. if (rate < 0) {
  245. dev_err(dd->dev,
  246. "%s: no match found for requested clock frequency:%d",
  247. __func__, speed);
  248. return;
  249. }
  250. rc = clk_set_rate(dd->clk, rate);
  251. if (!rc)
  252. dd->clock_speed = rate;
  253. }
  254. static void msm_spi_clk_path_vote(struct msm_spi *dd, u32 rate)
  255. {
  256. if (dd->bus_cl_hdl) {
  257. u64 ib = rate * dd->pdata->bus_width;
  258. msm_bus_scale_update_bw(dd->bus_cl_hdl, 0, ib);
  259. }
  260. }
  261. static void msm_spi_clk_path_teardown(struct msm_spi *dd)
  262. {
  263. msm_spi_clk_path_vote(dd, 0);
  264. if (dd->bus_cl_hdl) {
  265. msm_bus_scale_unregister(dd->bus_cl_hdl);
  266. dd->bus_cl_hdl = NULL;
  267. }
  268. }
  269. /**
  270. * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
  271. *
  272. * @return zero on success
  273. *
  274. * Workaround: SPI driver may be probed before the bus scaling driver. Calling
  275. * msm_bus_scale_register_client() will fail if the bus scaling driver is not
  276. * ready yet. Thus, this function should be called not from probe but from a
  277. * later context. Also, this function may be called more then once before
  278. * register succeed. At this case only one error message will be logged. At boot
  279. * time all clocks are on, so earlier SPI transactions should succeed.
  280. */
  281. static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
  282. {
  283. int ret = 0;
  284. dd->bus_cl_hdl = msm_bus_scale_register(dd->pdata->master_id,
  285. MSM_BUS_SLAVE_EBI_CH0,
  286. (char *)dev_name(dd->dev),
  287. false);
  288. if (IS_ERR_OR_NULL(dd->bus_cl_hdl)) {
  289. ret = (dd->bus_cl_hdl ? PTR_ERR(dd->bus_cl_hdl) : -EAGAIN);
  290. dev_err(dd->dev, "Failed bus registration Err %d", ret);
  291. }
  292. return ret;
  293. }
  294. static void msm_spi_clk_path_init(struct msm_spi *dd)
  295. {
  296. /*
  297. * bail out if path voting is diabled (master_id == 0) or if it is
  298. * already registered (client_hdl != 0)
  299. */
  300. if (!dd->pdata->master_id || dd->bus_cl_hdl)
  301. return;
  302. /* on failure try again later */
  303. if (msm_spi_clk_path_postponed_register(dd))
  304. return;
  305. }
  306. static int msm_spi_calculate_size(int *fifo_size,
  307. int *block_size,
  308. int block,
  309. int mult)
  310. {
  311. int words;
  312. switch (block) {
  313. case 0:
  314. words = 1; /* 4 bytes */
  315. break;
  316. case 1:
  317. words = 4; /* 16 bytes */
  318. break;
  319. case 2:
  320. words = 8; /* 32 bytes */
  321. break;
  322. default:
  323. return -EINVAL;
  324. }
  325. switch (mult) {
  326. case 0:
  327. *fifo_size = words * 2;
  328. break;
  329. case 1:
  330. *fifo_size = words * 4;
  331. break;
  332. case 2:
  333. *fifo_size = words * 8;
  334. break;
  335. case 3:
  336. *fifo_size = words * 16;
  337. break;
  338. default:
  339. return -EINVAL;
  340. }
  341. *block_size = words * sizeof(u32); /* in bytes */
  342. return 0;
  343. }
  344. static void msm_spi_calculate_fifo_size(struct msm_spi *dd)
  345. {
  346. u32 spi_iom;
  347. int block;
  348. int mult;
  349. spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
  350. block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
  351. mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
  352. if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
  353. block, mult)) {
  354. goto fifo_size_err;
  355. }
  356. block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
  357. mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
  358. if (msm_spi_calculate_size(&dd->output_fifo_size,
  359. &dd->output_block_size, block, mult)) {
  360. goto fifo_size_err;
  361. }
  362. if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
  363. /* DM mode is not available for this block size */
  364. if (dd->input_block_size == 4 || dd->output_block_size == 4)
  365. dd->use_dma = 0;
  366. if (dd->use_dma) {
  367. dd->input_burst_size = max(dd->input_block_size,
  368. DM_BURST_SIZE);
  369. dd->output_burst_size = max(dd->output_block_size,
  370. DM_BURST_SIZE);
  371. }
  372. }
  373. return;
  374. fifo_size_err:
  375. dd->use_dma = 0;
  376. pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
  377. }
  378. static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
  379. {
  380. u32 data_in;
  381. int i;
  382. int shift;
  383. int read_bytes = (dd->pack_words ?
  384. SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
  385. data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
  386. if (dd->read_buf) {
  387. for (i = 0; (i < read_bytes) &&
  388. dd->rx_bytes_remaining; i++) {
  389. /* The data format depends on bytes_per_word:
  390. * 4 bytes: 0x12345678
  391. * 3 bytes: 0x00123456
  392. * 2 bytes: 0x00001234
  393. * 1 byte : 0x00000012
  394. */
  395. shift = BITS_PER_BYTE * i;
  396. *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
  397. dd->rx_bytes_remaining--;
  398. }
  399. } else {
  400. if (dd->rx_bytes_remaining >= read_bytes)
  401. dd->rx_bytes_remaining -= read_bytes;
  402. else
  403. dd->rx_bytes_remaining = 0;
  404. }
  405. dd->read_xfr_cnt++;
  406. }
  407. static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
  408. {
  409. u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
  410. return spi_op & SPI_OP_STATE_VALID;
  411. }
  412. static inline void msm_spi_udelay(unsigned int delay_usecs)
  413. {
  414. /*
  415. * For smaller values of delay, context switch time
  416. * would negate the usage of usleep
  417. */
  418. if (delay_usecs > 20)
  419. usleep_range(delay_usecs, delay_usecs + 1);
  420. else if (delay_usecs)
  421. udelay(delay_usecs);
  422. }
  423. static inline int msm_spi_wait_valid(struct msm_spi *dd)
  424. {
  425. unsigned int delay = 0;
  426. unsigned long timeout = 0;
  427. if (dd->clock_speed == 0)
  428. return -EINVAL;
  429. /*
  430. * Based on the SPI clock speed, sufficient time
  431. * should be given for the SPI state transition
  432. * to occur
  433. */
  434. delay = (10 * USEC_PER_SEC) / dd->clock_speed;
  435. /*
  436. * For small delay values, the default timeout would
  437. * be one jiffy
  438. */
  439. if (delay < SPI_DELAY_THRESHOLD)
  440. delay = SPI_DELAY_THRESHOLD;
  441. /* Adding one to round off to the nearest jiffy */
  442. timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
  443. while (!msm_spi_is_valid_state(dd)) {
  444. if (time_after(jiffies, timeout)) {
  445. if (!msm_spi_is_valid_state(dd)) {
  446. dev_err(dd->dev, "Invalid SPI operational state\n");
  447. return -ETIMEDOUT;
  448. } else
  449. return 0;
  450. }
  451. msm_spi_udelay(delay);
  452. }
  453. return 0;
  454. }
  455. static inline int msm_spi_set_state(struct msm_spi *dd,
  456. enum msm_spi_state state)
  457. {
  458. enum msm_spi_state cur_state;
  459. if (msm_spi_wait_valid(dd))
  460. return -EIO;
  461. cur_state = readl_relaxed(dd->base + SPI_STATE);
  462. /* Per spec:
  463. * For PAUSE_STATE to RESET_STATE, two writes of (10) are required
  464. */
  465. if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
  466. (state == SPI_OP_STATE_RESET)) {
  467. writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
  468. writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
  469. } else {
  470. writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
  471. dd->base + SPI_STATE);
  472. }
  473. if (msm_spi_wait_valid(dd))
  474. return -EIO;
  475. return 0;
  476. }
  477. /**
  478. * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
  479. */
  480. static inline void
  481. msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
  482. {
  483. *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
  484. if (n != (*config & SPI_CFG_N))
  485. *config = (*config & ~SPI_CFG_N) | n;
  486. if (dd->tx_mode == SPI_BAM_MODE) {
  487. if (dd->read_buf == NULL)
  488. *config |= SPI_NO_INPUT;
  489. if (dd->write_buf == NULL)
  490. *config |= SPI_NO_OUTPUT;
  491. }
  492. }
  493. /**
  494. * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
  495. * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
  496. * @return calculatd value for SPI_CONFIG
  497. */
  498. static u32
  499. msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
  500. {
  501. if (mode & SPI_LOOP)
  502. spi_config |= SPI_CFG_LOOPBACK;
  503. else
  504. spi_config &= ~SPI_CFG_LOOPBACK;
  505. if (mode & SPI_CPHA)
  506. spi_config &= ~SPI_CFG_INPUT_FIRST;
  507. else
  508. spi_config |= SPI_CFG_INPUT_FIRST;
  509. return spi_config;
  510. }
  511. /**
  512. * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
  513. * next transfer
  514. */
  515. static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
  516. {
  517. u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
  518. spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
  519. spi_config, dd->spi->mode);
  520. if (dd->qup_ver == SPI_QUP_VERSION_NONE)
  521. /* flags removed from SPI_CONFIG in QUP version-2 */
  522. msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
  523. /*
  524. * HS_MODE improves signal stability for spi-clk high rates
  525. * but is invalid in LOOPBACK mode.
  526. */
  527. if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
  528. !(dd->spi->mode & SPI_LOOP))
  529. spi_config |= SPI_CFG_HS_MODE;
  530. else
  531. spi_config &= ~SPI_CFG_HS_MODE;
  532. writel_relaxed(spi_config, dd->base + SPI_CONFIG);
  533. }
  534. /**
  535. * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
  536. * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
  537. * BAM and DMOV modes.
  538. * @n_words The number of reads/writes of size N.
  539. */
  540. static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
  541. {
  542. /*
  543. * For FIFO mode:
  544. * - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0
  545. * - Set the READ/WRITE_COUNT registers to 0 (infinite mode)
  546. * or num bytes (finite mode) if less than fifo worth of data.
  547. * For Block mode:
  548. * - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes.
  549. * - Set the READ/WRITE_COUNT registers to 0.
  550. */
  551. if (dd->tx_mode != SPI_BAM_MODE) {
  552. if (dd->tx_mode == SPI_FIFO_MODE) {
  553. if (n_words <= dd->input_fifo_size)
  554. msm_spi_set_write_count(dd, n_words);
  555. else
  556. msm_spi_set_write_count(dd, 0);
  557. writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
  558. } else
  559. writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT);
  560. if (dd->rx_mode == SPI_FIFO_MODE) {
  561. if (n_words <= dd->input_fifo_size)
  562. writel_relaxed(n_words,
  563. dd->base + SPI_MX_READ_COUNT);
  564. else
  565. writel_relaxed(0,
  566. dd->base + SPI_MX_READ_COUNT);
  567. writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
  568. } else
  569. writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT);
  570. } else {
  571. /* must be zero for BAM and DMOV */
  572. writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
  573. msm_spi_set_write_count(dd, 0);
  574. /*
  575. * for DMA transfers, both QUP_MX_INPUT_COUNT and
  576. * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
  577. * That case is a non-balanced transfer when there is
  578. * only a read_buf.
  579. */
  580. if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
  581. if (dd->write_buf)
  582. writel_relaxed(0,
  583. dd->base + SPI_MX_INPUT_COUNT);
  584. else
  585. writel_relaxed(n_words,
  586. dd->base + SPI_MX_INPUT_COUNT);
  587. writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
  588. }
  589. }
  590. }
  591. static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
  592. struct msm_spi_bam_pipe *pipe)
  593. {
  594. int ret = sps_disconnect(pipe->handle);
  595. if (ret) {
  596. dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
  597. __func__, pipe->name);
  598. return ret;
  599. }
  600. return 0;
  601. }
  602. static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
  603. struct msm_spi_bam_pipe *pipe, struct sps_connect *config)
  604. {
  605. int ret;
  606. struct sps_register_event event = {
  607. .mode = SPS_TRIGGER_WAIT,
  608. .options = SPS_O_EOT,
  609. };
  610. if (pipe == &dd->bam.prod)
  611. event.xfer_done = &dd->rx_transfer_complete;
  612. else if (pipe == &dd->bam.cons)
  613. event.xfer_done = &dd->tx_transfer_complete;
  614. ret = sps_connect(pipe->handle, config);
  615. if (ret) {
  616. dev_err(dd->dev, "%s: sps_connect(%s:0x%pK):%d",
  617. __func__, pipe->name, pipe->handle, ret);
  618. return ret;
  619. }
  620. ret = sps_register_event(pipe->handle, &event);
  621. if (ret) {
  622. dev_err(dd->dev, "%s sps_register_event(hndl:0x%pK %s):%d",
  623. __func__, pipe->handle, pipe->name, ret);
  624. msm_spi_bam_pipe_disconnect(dd, pipe);
  625. return ret;
  626. }
  627. pipe->teardown_required = true;
  628. return 0;
  629. }
  630. static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
  631. enum msm_spi_pipe_direction pipe_dir)
  632. {
  633. struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
  634. (&dd->bam.prod) : (&dd->bam.cons);
  635. struct sps_connect config = pipe->config;
  636. int ret;
  637. ret = msm_spi_bam_pipe_disconnect(dd, pipe);
  638. if (ret)
  639. return;
  640. ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
  641. if (ret)
  642. return;
  643. }
  644. static void msm_spi_bam_flush(struct msm_spi *dd)
  645. {
  646. dev_dbg(dd->dev, "%s flushing bam for recovery\n", __func__);
  647. msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
  648. msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
  649. }
  650. static int
  651. msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
  652. {
  653. int ret = 0;
  654. u32 data_xfr_size = 0, rem_bc = 0;
  655. u32 prod_flags = 0;
  656. rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
  657. data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
  658. /*
  659. * set flags for last descriptor only
  660. */
  661. if ((desc_cnt == 1)
  662. || (*bytes_to_send == data_xfr_size))
  663. prod_flags = (dd->write_buf)
  664. ? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
  665. /*
  666. * enqueue read buffer in BAM
  667. */
  668. ret = sps_transfer_one(dd->bam.prod.handle,
  669. dd->cur_rx_transfer->rx_dma
  670. + dd->bam.curr_rx_bytes_recvd,
  671. data_xfr_size, dd, prod_flags);
  672. if (ret < 0) {
  673. dev_err(dd->dev,
  674. "%s: Failed to queue producer BAM transfer",
  675. __func__);
  676. return ret;
  677. }
  678. dd->bam.curr_rx_bytes_recvd += data_xfr_size;
  679. *bytes_to_send -= data_xfr_size;
  680. dd->bam.bam_rx_len -= data_xfr_size;
  681. return data_xfr_size;
  682. }
  683. static int
  684. msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
  685. {
  686. int ret = 0;
  687. u32 data_xfr_size = 0, rem_bc = 0;
  688. u32 cons_flags = 0;
  689. rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
  690. data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
  691. /*
  692. * set flags for last descriptor only
  693. */
  694. if ((desc_cnt == 1)
  695. || (*bytes_to_send == data_xfr_size))
  696. cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
  697. /*
  698. * enqueue write buffer in BAM
  699. */
  700. ret = sps_transfer_one(dd->bam.cons.handle,
  701. dd->cur_tx_transfer->tx_dma
  702. + dd->bam.curr_tx_bytes_sent,
  703. data_xfr_size, dd, cons_flags);
  704. if (ret < 0) {
  705. dev_err(dd->dev,
  706. "%s: Failed to queue consumer BAM transfer",
  707. __func__);
  708. return ret;
  709. }
  710. dd->bam.curr_tx_bytes_sent += data_xfr_size;
  711. *bytes_to_send -= data_xfr_size;
  712. dd->bam.bam_tx_len -= data_xfr_size;
  713. return data_xfr_size;
  714. }
  715. /**
  716. * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
  717. * using BAM.
  718. * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
  719. * transfer. Between transfer QUP must change to reset state. A loop is
  720. * issuing a single BAM transfer at a time.
  721. * @return zero on success
  722. */
  723. static int
  724. msm_spi_bam_begin_transfer(struct msm_spi *dd)
  725. {
  726. u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
  727. u32 n_words_xfr;
  728. s32 ret = 0;
  729. u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
  730. u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
  731. u32 byte_count = 0;
  732. rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
  733. SPI_MAX_TRFR_BTWN_RESETS);
  734. tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
  735. SPI_MAX_TRFR_BTWN_RESETS);
  736. n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
  737. dd->bytes_per_word);
  738. msm_spi_set_mx_counts(dd, n_words_xfr);
  739. ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
  740. if (ret < 0) {
  741. dev_err(dd->dev,
  742. "%s: Failed to set QUP state to run",
  743. __func__);
  744. goto xfr_err;
  745. }
  746. while ((rx_bytes_to_recv + tx_bytes_to_send) &&
  747. ((cons_desc_cnt + prod_desc_cnt) > 0)) {
  748. struct spi_transfer *t = NULL;
  749. if (dd->read_buf && (prod_desc_cnt > 0)) {
  750. ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
  751. prod_desc_cnt);
  752. if (ret < 0)
  753. goto xfr_err;
  754. if (!(dd->cur_rx_transfer->len
  755. - dd->bam.curr_rx_bytes_recvd))
  756. t = dd->cur_rx_transfer;
  757. prod_desc_cnt--;
  758. }
  759. if (dd->write_buf && (cons_desc_cnt > 0)) {
  760. ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
  761. cons_desc_cnt);
  762. if (ret < 0)
  763. goto xfr_err;
  764. if (!(dd->cur_tx_transfer->len
  765. - dd->bam.curr_tx_bytes_sent))
  766. t = dd->cur_tx_transfer;
  767. cons_desc_cnt--;
  768. }
  769. byte_count += ret;
  770. }
  771. dd->tx_bytes_remaining -= min_t(u32, byte_count,
  772. SPI_MAX_TRFR_BTWN_RESETS);
  773. return 0;
  774. xfr_err:
  775. return ret;
  776. }
  777. static int
  778. msm_spi_bam_next_transfer(struct msm_spi *dd)
  779. {
  780. if (dd->tx_mode != SPI_BAM_MODE)
  781. return 0;
  782. if (dd->tx_bytes_remaining > 0) {
  783. if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
  784. return 0;
  785. if ((msm_spi_bam_begin_transfer(dd)) < 0) {
  786. dev_err(dd->dev, "%s: BAM transfer setup failed\n",
  787. __func__);
  788. return 0;
  789. }
  790. return 1;
  791. }
  792. return 0;
  793. }
  794. static int msm_spi_dma_send_next(struct msm_spi *dd)
  795. {
  796. int ret = 0;
  797. if (dd->tx_mode == SPI_BAM_MODE)
  798. ret = msm_spi_bam_next_transfer(dd);
  799. return ret;
  800. }
  801. static inline void msm_spi_ack_transfer(struct msm_spi *dd)
  802. {
  803. writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
  804. SPI_OP_MAX_OUTPUT_DONE_FLAG,
  805. dd->base + SPI_OPERATIONAL);
  806. /* Ensure done flag was cleared before proceeding further */
  807. mb();
  808. }
  809. /* Figure which irq occurred and call the relevant functions */
  810. static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
  811. {
  812. u32 op, ret = IRQ_NONE;
  813. struct msm_spi *dd = dev_id;
  814. if (pm_runtime_suspended(dd->dev)) {
  815. dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
  816. return ret;
  817. }
  818. if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
  819. readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
  820. struct spi_master *master = dev_get_drvdata(dd->dev);
  821. ret |= msm_spi_error_irq(irq, master);
  822. }
  823. op = readl_relaxed(dd->base + SPI_OPERATIONAL);
  824. writel_relaxed(op, dd->base + SPI_OPERATIONAL);
  825. /*
  826. * Ensure service flag was cleared before further
  827. * processing of interrupt.
  828. */
  829. mb();
  830. if (op & SPI_OP_INPUT_SERVICE_FLAG)
  831. ret |= msm_spi_input_irq(irq, dev_id);
  832. if (op & SPI_OP_OUTPUT_SERVICE_FLAG)
  833. ret |= msm_spi_output_irq(irq, dev_id);
  834. if (dd->tx_mode != SPI_BAM_MODE) {
  835. if (!dd->rx_done) {
  836. if (dd->rx_bytes_remaining == 0)
  837. dd->rx_done = true;
  838. }
  839. if (!dd->tx_done) {
  840. if (!dd->tx_bytes_remaining &&
  841. (op & SPI_OP_IP_FIFO_NOT_EMPTY)) {
  842. dd->tx_done = true;
  843. }
  844. }
  845. }
  846. if (dd->tx_done && dd->rx_done) {
  847. msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  848. dd->tx_done = false;
  849. dd->rx_done = false;
  850. complete(&dd->rx_transfer_complete);
  851. complete(&dd->tx_transfer_complete);
  852. }
  853. return ret;
  854. }
  855. static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
  856. {
  857. struct msm_spi *dd = dev_id;
  858. dd->stat_rx++;
  859. if (dd->rx_mode == SPI_MODE_NONE)
  860. return IRQ_HANDLED;
  861. if (dd->rx_mode == SPI_FIFO_MODE) {
  862. while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
  863. SPI_OP_IP_FIFO_NOT_EMPTY) &&
  864. (dd->rx_bytes_remaining > 0)) {
  865. msm_spi_read_word_from_fifo(dd);
  866. }
  867. } else if (dd->rx_mode == SPI_BLOCK_MODE) {
  868. int count = 0;
  869. while (dd->rx_bytes_remaining &&
  870. (count < dd->input_block_size)) {
  871. msm_spi_read_word_from_fifo(dd);
  872. count += SPI_MAX_BYTES_PER_WORD;
  873. }
  874. }
  875. return IRQ_HANDLED;
  876. }
  877. static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
  878. {
  879. u32 word;
  880. u8 byte;
  881. int i;
  882. int write_bytes =
  883. (dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
  884. word = 0;
  885. if (dd->write_buf) {
  886. for (i = 0; (i < write_bytes) &&
  887. dd->tx_bytes_remaining; i++) {
  888. dd->tx_bytes_remaining--;
  889. byte = *dd->write_buf++;
  890. word |= (byte << (BITS_PER_BYTE * i));
  891. }
  892. } else
  893. if (dd->tx_bytes_remaining > write_bytes)
  894. dd->tx_bytes_remaining -= write_bytes;
  895. else
  896. dd->tx_bytes_remaining = 0;
  897. dd->write_xfr_cnt++;
  898. writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
  899. }
  900. static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
  901. {
  902. int count = 0;
  903. if (dd->tx_mode == SPI_FIFO_MODE) {
  904. while ((dd->tx_bytes_remaining > 0) &&
  905. (count < dd->input_fifo_size) &&
  906. !(readl_relaxed(dd->base + SPI_OPERATIONAL)
  907. & SPI_OP_OUTPUT_FIFO_FULL)) {
  908. msm_spi_write_word_to_fifo(dd);
  909. count++;
  910. }
  911. }
  912. if (dd->tx_mode == SPI_BLOCK_MODE) {
  913. while (dd->tx_bytes_remaining &&
  914. (count < dd->output_block_size)) {
  915. msm_spi_write_word_to_fifo(dd);
  916. count += SPI_MAX_BYTES_PER_WORD;
  917. }
  918. }
  919. }
  920. static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
  921. {
  922. struct msm_spi *dd = dev_id;
  923. dd->stat_tx++;
  924. if (dd->tx_mode == SPI_MODE_NONE)
  925. return IRQ_HANDLED;
  926. /* Output FIFO is empty. Transmit any outstanding write data. */
  927. if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE))
  928. msm_spi_write_rmn_to_fifo(dd);
  929. return IRQ_HANDLED;
  930. }
  931. static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
  932. {
  933. struct spi_master *master = dev_id;
  934. struct msm_spi *dd = spi_master_get_devdata(master);
  935. u32 spi_err;
  936. spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
  937. if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
  938. dev_warn(master->dev.parent, "SPI output overrun error\n");
  939. if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
  940. dev_warn(master->dev.parent, "SPI input underrun error\n");
  941. if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
  942. dev_warn(master->dev.parent, "SPI output underrun error\n");
  943. msm_spi_get_clk_err(dd, &spi_err);
  944. if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
  945. dev_warn(master->dev.parent, "SPI clock overrun error\n");
  946. if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
  947. dev_warn(master->dev.parent, "SPI clock underrun error\n");
  948. msm_spi_clear_error_flags(dd);
  949. msm_spi_ack_clk_err(dd);
  950. /* Ensure clearing of QUP_ERROR_FLAGS was completed */
  951. mb();
  952. return IRQ_HANDLED;
  953. }
  954. static int msm_spi_bam_map_buffers(struct msm_spi *dd)
  955. {
  956. int ret = -EINVAL;
  957. struct device *dev;
  958. struct spi_transfer *xfr;
  959. void *tx_buf, *rx_buf;
  960. u32 tx_len, rx_len;
  961. dev = dd->dev;
  962. xfr = dd->cur_transfer;
  963. tx_buf = (void *)xfr->tx_buf;
  964. rx_buf = xfr->rx_buf;
  965. tx_len = rx_len = xfr->len;
  966. if (tx_buf != NULL) {
  967. xfr->tx_dma = dma_map_single(dev, tx_buf,
  968. tx_len, DMA_TO_DEVICE);
  969. if (dma_mapping_error(dev, xfr->tx_dma)) {
  970. ret = -ENOMEM;
  971. goto error;
  972. }
  973. }
  974. if (rx_buf != NULL) {
  975. xfr->rx_dma = dma_map_single(dev, rx_buf, rx_len,
  976. DMA_FROM_DEVICE);
  977. if (dma_mapping_error(dev, xfr->rx_dma)) {
  978. if (tx_buf != NULL)
  979. dma_unmap_single(dev,
  980. xfr->tx_dma,
  981. tx_len, DMA_TO_DEVICE);
  982. ret = -ENOMEM;
  983. goto error;
  984. }
  985. }
  986. return 0;
  987. error:
  988. msm_spi_dma_unmap_buffers(dd);
  989. return ret;
  990. }
  991. static int msm_spi_dma_map_buffers(struct msm_spi *dd)
  992. {
  993. int ret = 0;
  994. if (dd->tx_mode == SPI_BAM_MODE)
  995. ret = msm_spi_bam_map_buffers(dd);
  996. return ret;
  997. }
  998. static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
  999. {
  1000. struct device *dev;
  1001. struct spi_transfer *xfr;
  1002. void *tx_buf, *rx_buf;
  1003. u32 tx_len, rx_len;
  1004. dev = dd->dev;
  1005. xfr = dd->cur_transfer;
  1006. tx_buf = (void *)xfr->tx_buf;
  1007. rx_buf = xfr->rx_buf;
  1008. tx_len = rx_len = xfr->len;
  1009. if (tx_buf != NULL)
  1010. dma_unmap_single(dev, xfr->tx_dma,
  1011. tx_len, DMA_TO_DEVICE);
  1012. if (rx_buf != NULL)
  1013. dma_unmap_single(dev, xfr->rx_dma,
  1014. rx_len, DMA_FROM_DEVICE);
  1015. }
  1016. static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
  1017. {
  1018. if (dd->tx_mode == SPI_BAM_MODE)
  1019. msm_spi_bam_unmap_buffers(dd);
  1020. }
  1021. /**
  1022. * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
  1023. * the given transfer
  1024. * @dd: device
  1025. * @tr: transfer
  1026. *
  1027. * Start using DMA if:
  1028. * 1. Is supported by HW
  1029. * 2. Is not diabled by platform data
  1030. * 3. Transfer size is greater than 3*block size.
  1031. * 4. Buffers are aligned to cache line.
  1032. * 5. Bytes-per-word is 8,16 or 32.
  1033. */
  1034. static inline bool
  1035. msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
  1036. {
  1037. if (!dd->use_dma)
  1038. return false;
  1039. /* check constraints from platform data */
  1040. if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
  1041. return false;
  1042. if (dd->cur_msg_len < 3*dd->input_block_size)
  1043. return false;
  1044. if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
  1045. !dd->read_len && !dd->write_len)
  1046. return false;
  1047. if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
  1048. u32 cache_line = dma_get_cache_alignment();
  1049. if (tr->tx_buf) {
  1050. if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
  1051. return 0;
  1052. }
  1053. if (tr->rx_buf) {
  1054. if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
  1055. return false;
  1056. }
  1057. if (tr->cs_change &&
  1058. ((bpw != 8) && (bpw != 16) && (bpw != 32)))
  1059. return false;
  1060. }
  1061. return true;
  1062. }
  1063. /**
  1064. * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
  1065. * prepares to process a transfer.
  1066. */
  1067. static void
  1068. msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
  1069. {
  1070. if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
  1071. dd->tx_mode = SPI_BAM_MODE;
  1072. dd->rx_mode = SPI_BAM_MODE;
  1073. } else {
  1074. dd->rx_mode = SPI_FIFO_MODE;
  1075. dd->tx_mode = SPI_FIFO_MODE;
  1076. dd->read_len = dd->cur_transfer->len;
  1077. dd->write_len = dd->cur_transfer->len;
  1078. }
  1079. }
  1080. /**
  1081. * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
  1082. * transfer
  1083. */
  1084. static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
  1085. {
  1086. u32 spi_iom;
  1087. spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
  1088. /* Set input and output transfer mode: FIFO, DMOV, or BAM */
  1089. spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
  1090. spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT));
  1091. spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT));
  1092. /* Always enable packing for the BAM mode and for non BAM mode only
  1093. * if bpw is % 8 and transfer length is % 4 Bytes.
  1094. */
  1095. if (dd->tx_mode == SPI_BAM_MODE ||
  1096. ((dd->cur_msg_len % SPI_MAX_BYTES_PER_WORD == 0) &&
  1097. (dd->cur_transfer->bits_per_word) &&
  1098. (dd->cur_transfer->bits_per_word <= 32) &&
  1099. (dd->cur_transfer->bits_per_word % 8 == 0))) {
  1100. spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
  1101. dd->pack_words = true;
  1102. } else {
  1103. spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
  1104. spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN;
  1105. dd->pack_words = false;
  1106. }
  1107. writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
  1108. }
  1109. static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
  1110. {
  1111. if (mode & SPI_CPOL)
  1112. spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
  1113. else
  1114. spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
  1115. return spi_ioc;
  1116. }
  1117. /**
  1118. * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
  1119. * next transfer
  1120. * @return the new set value of SPI_IO_CONTROL
  1121. */
  1122. static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
  1123. {
  1124. u32 spi_ioc, spi_ioc_orig, chip_select;
  1125. spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
  1126. spi_ioc_orig = spi_ioc;
  1127. spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
  1128. , dd->spi->mode);
  1129. /* Set chip-select */
  1130. chip_select = dd->spi->chip_select << 2;
  1131. if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
  1132. spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
  1133. if (!dd->cur_transfer->cs_change)
  1134. spi_ioc |= SPI_IO_C_MX_CS_MODE;
  1135. if (spi_ioc != spi_ioc_orig)
  1136. writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
  1137. /*
  1138. * Ensure that the IO control mode register gets written
  1139. * before proceeding with the transfer.
  1140. */
  1141. mb();
  1142. return spi_ioc;
  1143. }
  1144. /**
  1145. * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
  1146. * the next transfer
  1147. */
  1148. static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
  1149. {
  1150. /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
  1151. * change in BAM mode
  1152. */
  1153. u32 mask = (dd->tx_mode == SPI_BAM_MODE) ?
  1154. QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
  1155. : 0;
  1156. writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
  1157. }
  1158. static void get_transfer_length(struct msm_spi *dd)
  1159. {
  1160. struct spi_transfer *xfer = dd->cur_transfer;
  1161. dd->cur_msg_len = 0;
  1162. dd->read_len = dd->write_len = 0;
  1163. dd->bam.bam_tx_len = dd->bam.bam_rx_len = 0;
  1164. if (xfer->tx_buf)
  1165. dd->bam.bam_tx_len = dd->write_len = xfer->len;
  1166. if (xfer->rx_buf)
  1167. dd->bam.bam_rx_len = dd->read_len = xfer->len;
  1168. dd->cur_msg_len = xfer->len;
  1169. }
  1170. static int msm_spi_process_transfer(struct msm_spi *dd)
  1171. {
  1172. u8 bpw;
  1173. u32 max_speed;
  1174. u32 read_count;
  1175. u32 timeout;
  1176. u32 spi_ioc;
  1177. u32 int_loopback = 0;
  1178. int ret;
  1179. int status = 0;
  1180. get_transfer_length(dd);
  1181. dd->cur_tx_transfer = dd->cur_transfer;
  1182. dd->cur_rx_transfer = dd->cur_transfer;
  1183. dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
  1184. dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
  1185. dd->tx_bytes_remaining = dd->cur_msg_len;
  1186. dd->rx_bytes_remaining = dd->cur_msg_len;
  1187. dd->read_buf = dd->cur_transfer->rx_buf;
  1188. dd->write_buf = dd->cur_transfer->tx_buf;
  1189. dd->tx_done = false;
  1190. dd->rx_done = false;
  1191. init_completion(&dd->tx_transfer_complete);
  1192. init_completion(&dd->rx_transfer_complete);
  1193. if (dd->cur_transfer->bits_per_word)
  1194. bpw = dd->cur_transfer->bits_per_word;
  1195. else
  1196. bpw = 8;
  1197. dd->bytes_per_word = (bpw + 7) / 8;
  1198. if (dd->cur_transfer->speed_hz)
  1199. max_speed = dd->cur_transfer->speed_hz;
  1200. else
  1201. max_speed = dd->spi->max_speed_hz;
  1202. if (!dd->clock_speed || max_speed != dd->clock_speed)
  1203. msm_spi_clock_set(dd, max_speed);
  1204. timeout = 100 * msecs_to_jiffies(
  1205. DIV_ROUND_UP(dd->cur_msg_len * 8,
  1206. DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
  1207. read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
  1208. if (dd->spi->mode & SPI_LOOP)
  1209. int_loopback = 1;
  1210. ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  1211. if (ret < 0) {
  1212. dev_err(dd->dev,
  1213. "%s: Error setting QUP to reset-state",
  1214. __func__);
  1215. return ret;
  1216. }
  1217. msm_spi_set_transfer_mode(dd, bpw, read_count);
  1218. msm_spi_set_mx_counts(dd, read_count);
  1219. if (dd->tx_mode == SPI_BAM_MODE) {
  1220. ret = msm_spi_dma_map_buffers(dd);
  1221. if (ret < 0) {
  1222. pr_err("Mapping DMA buffers\n");
  1223. dd->tx_mode = SPI_MODE_NONE;
  1224. dd->rx_mode = SPI_MODE_NONE;
  1225. return ret;
  1226. }
  1227. }
  1228. msm_spi_set_qup_io_modes(dd);
  1229. msm_spi_set_spi_config(dd, bpw);
  1230. msm_spi_set_qup_config(dd, bpw);
  1231. spi_ioc = msm_spi_set_spi_io_control(dd);
  1232. msm_spi_set_qup_op_mask(dd);
  1233. /* The output fifo interrupt handler will handle all writes after
  1234. * the first. Restricting this to one write avoids contention
  1235. * issues and race conditions between this thread and the int handler
  1236. */
  1237. if (dd->tx_mode != SPI_BAM_MODE) {
  1238. if (msm_spi_prepare_for_write(dd))
  1239. goto transfer_end;
  1240. msm_spi_start_write(dd, read_count);
  1241. } else {
  1242. if ((msm_spi_bam_begin_transfer(dd)) < 0) {
  1243. dev_err(dd->dev, "%s: BAM transfer setup failed\n",
  1244. __func__);
  1245. status = -EIO;
  1246. goto transfer_end;
  1247. }
  1248. }
  1249. /*
  1250. * On BAM mode, current state here is run.
  1251. * Only enter the RUN state after the first word is written into
  1252. * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
  1253. * might fire before the first word is written resulting in a
  1254. * possible race condition.
  1255. */
  1256. if (dd->tx_mode != SPI_BAM_MODE)
  1257. if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
  1258. dev_warn(dd->dev,
  1259. "%s: Failed to set QUP to run-state. Mode:%d",
  1260. __func__, dd->tx_mode);
  1261. goto transfer_end;
  1262. }
  1263. /* Assume success, this might change later upon transaction result */
  1264. do {
  1265. if (dd->write_buf &&
  1266. !wait_for_completion_timeout(&dd->tx_transfer_complete,
  1267. timeout)) {
  1268. dev_err(dd->dev, "%s: SPI Tx transaction timeout\n",
  1269. __func__);
  1270. status = -EIO;
  1271. break;
  1272. }
  1273. if (dd->read_buf &&
  1274. !wait_for_completion_timeout(&dd->rx_transfer_complete,
  1275. timeout)) {
  1276. dev_err(dd->dev, "%s: SPI Rx transaction timeout\n",
  1277. __func__);
  1278. status = -EIO;
  1279. break;
  1280. }
  1281. } while (msm_spi_dma_send_next(dd));
  1282. msm_spi_udelay(dd->xfrs_delay_usec);
  1283. transfer_end:
  1284. if ((dd->tx_mode == SPI_BAM_MODE) && status)
  1285. msm_spi_bam_flush(dd);
  1286. msm_spi_dma_unmap_buffers(dd);
  1287. dd->tx_mode = SPI_MODE_NONE;
  1288. dd->rx_mode = SPI_MODE_NONE;
  1289. msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  1290. if (!dd->cur_transfer->cs_change)
  1291. writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
  1292. dd->base + SPI_IO_CONTROL);
  1293. return status;
  1294. }
  1295. static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
  1296. {
  1297. struct msm_spi *dd = spi_master_get_devdata(spi->master);
  1298. u32 spi_ioc;
  1299. u32 spi_ioc_orig;
  1300. int rc = 0;
  1301. rc = pm_runtime_get_sync(dd->dev);
  1302. if (rc < 0) {
  1303. dev_err(dd->dev, "Failure during runtime get,rc=%d", rc);
  1304. return;
  1305. }
  1306. if (dd->pdata->is_shared) {
  1307. rc = get_local_resources(dd);
  1308. if (rc)
  1309. return;
  1310. }
  1311. msm_spi_clk_path_vote(dd, spi->max_speed_hz);
  1312. if (!(spi->mode & SPI_CS_HIGH))
  1313. set_flag = !set_flag;
  1314. /* Serve only under mutex lock as RT suspend may cause a race */
  1315. mutex_lock(&dd->core_lock);
  1316. if (dd->suspended) {
  1317. dev_err(dd->dev, "%s: SPI operational state=%d Invalid\n",
  1318. __func__, dd->suspended);
  1319. mutex_unlock(&dd->core_lock);
  1320. return;
  1321. }
  1322. spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
  1323. spi_ioc_orig = spi_ioc;
  1324. if (set_flag)
  1325. spi_ioc |= SPI_IO_C_FORCE_CS;
  1326. else
  1327. spi_ioc &= ~SPI_IO_C_FORCE_CS;
  1328. if (spi_ioc != spi_ioc_orig)
  1329. writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
  1330. if (dd->pdata->is_shared)
  1331. put_local_resources(dd);
  1332. mutex_unlock(&dd->core_lock);
  1333. pm_runtime_mark_last_busy(dd->dev);
  1334. pm_runtime_put_autosuspend(dd->dev);
  1335. }
  1336. static void reset_core(struct msm_spi *dd)
  1337. {
  1338. u32 spi_ioc;
  1339. msm_spi_register_init(dd);
  1340. /*
  1341. * The SPI core generates a bogus input overrun error on some targets,
  1342. * when a transition from run to reset state occurs and if the FIFO has
  1343. * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
  1344. * bit.
  1345. */
  1346. msm_spi_enable_error_flags(dd);
  1347. spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
  1348. spi_ioc |= SPI_IO_C_NO_TRI_STATE;
  1349. writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
  1350. /*
  1351. * Ensure that the IO control is written to before returning.
  1352. */
  1353. mb();
  1354. msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  1355. }
  1356. static void put_local_resources(struct msm_spi *dd)
  1357. {
  1358. if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) {
  1359. dev_err(dd->dev,
  1360. "%s: error clk put\n",
  1361. __func__);
  1362. return;
  1363. }
  1364. msm_spi_disable_irqs(dd);
  1365. clk_disable_unprepare(dd->clk);
  1366. dd->clock_speed = 0;
  1367. clk_disable_unprepare(dd->pclk);
  1368. /* Free the spi clk, miso, mosi, cs gpio */
  1369. if (dd->pdata && dd->pdata->gpio_release)
  1370. dd->pdata->gpio_release();
  1371. msm_spi_free_gpios(dd);
  1372. }
  1373. static int get_local_resources(struct msm_spi *dd)
  1374. {
  1375. int ret = -EINVAL;
  1376. if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) {
  1377. dev_err(dd->dev,
  1378. "%s: error clk put\n",
  1379. __func__);
  1380. return ret;
  1381. }
  1382. /* Configure the spi clk, miso, mosi and cs gpio */
  1383. if (dd->pdata->gpio_config) {
  1384. ret = dd->pdata->gpio_config();
  1385. if (ret) {
  1386. dev_err(dd->dev,
  1387. "%s: error configuring GPIOs\n",
  1388. __func__);
  1389. return ret;
  1390. }
  1391. }
  1392. ret = msm_spi_request_gpios(dd);
  1393. if (ret)
  1394. return ret;
  1395. ret = clk_prepare_enable(dd->clk);
  1396. if (ret)
  1397. goto clk0_err;
  1398. ret = clk_prepare_enable(dd->pclk);
  1399. if (ret)
  1400. goto clk1_err;
  1401. msm_spi_enable_irqs(dd);
  1402. return 0;
  1403. clk1_err:
  1404. clk_disable_unprepare(dd->clk);
  1405. clk0_err:
  1406. msm_spi_free_gpios(dd);
  1407. return ret;
  1408. }
  1409. /**
  1410. * msm_spi_transfer_one: To process one spi transfer at a time
  1411. * @master: spi master controller reference
  1412. * @msg: one multi-segment SPI transaction
  1413. * @return zero on success or negative error value
  1414. *
  1415. */
  1416. static int msm_spi_transfer_one(struct spi_master *master,
  1417. struct spi_device *spi,
  1418. struct spi_transfer *xfer)
  1419. {
  1420. struct msm_spi *dd;
  1421. unsigned long flags;
  1422. u32 status_error = 0;
  1423. dd = spi_master_get_devdata(master);
  1424. /* Check message parameters */
  1425. if (xfer->speed_hz > dd->pdata->max_clock_speed ||
  1426. (xfer->bits_per_word &&
  1427. (xfer->bits_per_word < 4 || xfer->bits_per_word > 32)) ||
  1428. (xfer->tx_buf == NULL && xfer->rx_buf == NULL)) {
  1429. dev_err(dd->dev,
  1430. "Invalid transfer: %d Hz, %d bpw tx=%pK, rx=%pK\n",
  1431. xfer->speed_hz, xfer->bits_per_word,
  1432. xfer->tx_buf, xfer->rx_buf);
  1433. return -EINVAL;
  1434. }
  1435. dd->spi = spi;
  1436. dd->cur_transfer = xfer;
  1437. mutex_lock(&dd->core_lock);
  1438. spin_lock_irqsave(&dd->queue_lock, flags);
  1439. dd->transfer_pending = 1;
  1440. spin_unlock_irqrestore(&dd->queue_lock, flags);
  1441. /*
  1442. * get local resources for each transfer to ensure we're in a good
  1443. * state and not interfering with other EE's using this device
  1444. */
  1445. if (dd->pdata->is_shared) {
  1446. if (get_local_resources(dd)) {
  1447. mutex_unlock(&dd->core_lock);
  1448. spi_finalize_current_message(master);
  1449. return -EINVAL;
  1450. }
  1451. reset_core(dd);
  1452. if (dd->use_dma) {
  1453. msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
  1454. &dd->bam.prod.config);
  1455. msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
  1456. &dd->bam.cons.config);
  1457. }
  1458. }
  1459. if (dd->suspended || !msm_spi_is_valid_state(dd)) {
  1460. dev_err(dd->dev, "%s: SPI operational state not valid\n",
  1461. __func__);
  1462. status_error = 1;
  1463. }
  1464. if (!status_error)
  1465. status_error =
  1466. msm_spi_process_transfer(dd);
  1467. spin_lock_irqsave(&dd->queue_lock, flags);
  1468. dd->transfer_pending = 0;
  1469. spin_unlock_irqrestore(&dd->queue_lock, flags);
  1470. /*
  1471. * Put local resources prior to calling finalize to ensure the hw
  1472. * is in a known state before notifying the calling thread (which is a
  1473. * different context since we're running in the spi kthread here) to
  1474. * prevent race conditions between us and any other EE's using this hw.
  1475. */
  1476. if (dd->pdata->is_shared) {
  1477. if (dd->use_dma) {
  1478. msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
  1479. msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
  1480. }
  1481. put_local_resources(dd);
  1482. }
  1483. mutex_unlock(&dd->core_lock);
  1484. if (dd->suspended)
  1485. wake_up_interruptible(&dd->continue_suspend);
  1486. return status_error;
  1487. }
  1488. static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
  1489. {
  1490. struct msm_spi *dd = spi_master_get_devdata(master);
  1491. int resume_state = 0;
  1492. resume_state = pm_runtime_get_sync(dd->dev);
  1493. if (resume_state < 0)
  1494. goto spi_finalize;
  1495. /*
  1496. * Counter-part of system-suspend when runtime-pm is not enabled.
  1497. * This way, resume can be left empty and device will be put in
  1498. * active mode only if client requests anything on the bus
  1499. */
  1500. if (!pm_runtime_enabled(dd->dev))
  1501. resume_state = msm_spi_pm_resume_runtime(dd->dev);
  1502. if (resume_state < 0)
  1503. goto spi_finalize;
  1504. if (dd->suspended) {
  1505. resume_state = -EBUSY;
  1506. goto spi_finalize;
  1507. }
  1508. return 0;
  1509. spi_finalize:
  1510. spi_finalize_current_message(master);
  1511. return resume_state;
  1512. }
  1513. static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
  1514. {
  1515. struct msm_spi *dd = spi_master_get_devdata(master);
  1516. pm_runtime_mark_last_busy(dd->dev);
  1517. pm_runtime_put_autosuspend(dd->dev);
  1518. return 0;
  1519. }
  1520. static int msm_spi_setup(struct spi_device *spi)
  1521. {
  1522. struct msm_spi *dd;
  1523. int rc = 0;
  1524. u32 spi_ioc;
  1525. u32 spi_config;
  1526. u32 mask;
  1527. if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
  1528. dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
  1529. __func__, spi->bits_per_word);
  1530. return -EINVAL;
  1531. }
  1532. if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
  1533. dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
  1534. __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
  1535. return -EINVAL;
  1536. }
  1537. dd = spi_master_get_devdata(spi->master);
  1538. rc = pm_runtime_get_sync(dd->dev);
  1539. if (rc < 0 && !dd->is_init_complete &&
  1540. pm_runtime_enabled(dd->dev)) {
  1541. pm_runtime_set_suspended(dd->dev);
  1542. pm_runtime_put_sync(dd->dev);
  1543. rc = 0;
  1544. goto err_setup_exit;
  1545. } else
  1546. rc = 0;
  1547. mutex_lock(&dd->core_lock);
  1548. /* Counter-part of system-suspend when runtime-pm is not enabled. */
  1549. if (!pm_runtime_enabled(dd->dev)) {
  1550. rc = msm_spi_pm_resume_runtime(dd->dev);
  1551. if (rc < 0 && !dd->is_init_complete) {
  1552. rc = 0;
  1553. mutex_unlock(&dd->core_lock);
  1554. goto err_setup_exit;
  1555. }
  1556. }
  1557. if (dd->suspended) {
  1558. rc = -EBUSY;
  1559. mutex_unlock(&dd->core_lock);
  1560. goto err_setup_exit;
  1561. }
  1562. if (dd->pdata->is_shared) {
  1563. rc = get_local_resources(dd);
  1564. if (rc)
  1565. goto no_resources;
  1566. }
  1567. spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
  1568. mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
  1569. if (spi->mode & SPI_CS_HIGH)
  1570. spi_ioc |= mask;
  1571. else
  1572. spi_ioc &= ~mask;
  1573. spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
  1574. writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
  1575. spi_config = readl_relaxed(dd->base + SPI_CONFIG);
  1576. spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
  1577. spi_config, spi->mode);
  1578. writel_relaxed(spi_config, dd->base + SPI_CONFIG);
  1579. /* Ensure previous write completed before disabling the clocks */
  1580. mb();
  1581. if (dd->pdata->is_shared)
  1582. put_local_resources(dd);
  1583. /* Counter-part of system-resume when runtime-pm is not enabled. */
  1584. if (!pm_runtime_enabled(dd->dev))
  1585. msm_spi_pm_suspend_runtime(dd->dev);
  1586. no_resources:
  1587. mutex_unlock(&dd->core_lock);
  1588. pm_runtime_mark_last_busy(dd->dev);
  1589. pm_runtime_put_autosuspend(dd->dev);
  1590. err_setup_exit:
  1591. return rc;
  1592. }
  1593. #ifdef CONFIG_DEBUG_FS
  1594. static int debugfs_iomem_x32_set(void *data, u64 val)
  1595. {
  1596. struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
  1597. struct msm_spi *dd = reg->dd;
  1598. int ret;
  1599. ret = pm_runtime_get_sync(dd->dev);
  1600. if (ret < 0)
  1601. return ret;
  1602. writel_relaxed(val, (dd->base + reg->offset));
  1603. /* Ensure the previous write completed. */
  1604. mb();
  1605. pm_runtime_mark_last_busy(dd->dev);
  1606. pm_runtime_put_autosuspend(dd->dev);
  1607. return 0;
  1608. }
  1609. static int debugfs_iomem_x32_get(void *data, u64 *val)
  1610. {
  1611. struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
  1612. struct msm_spi *dd = reg->dd;
  1613. int ret;
  1614. ret = pm_runtime_get_sync(dd->dev);
  1615. if (ret < 0)
  1616. return ret;
  1617. *val = readl_relaxed(dd->base + reg->offset);
  1618. /* Ensure the previous read completed. */
  1619. mb();
  1620. pm_runtime_mark_last_busy(dd->dev);
  1621. pm_runtime_put_autosuspend(dd->dev);
  1622. return 0;
  1623. }
  1624. DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
  1625. debugfs_iomem_x32_set, "0x%08llx\n");
  1626. static void spi_debugfs_init(struct msm_spi *dd)
  1627. {
  1628. char dir_name[20];
  1629. scnprintf(dir_name, sizeof(dir_name), "%s_dbg", dev_name(dd->dev));
  1630. dd->dent_spi = debugfs_create_dir(dir_name, NULL);
  1631. if (dd->dent_spi) {
  1632. int i;
  1633. for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
  1634. dd->reg_data[i].offset = debugfs_spi_regs[i].offset;
  1635. dd->reg_data[i].dd = dd;
  1636. dd->debugfs_spi_regs[i] =
  1637. debugfs_create_file(
  1638. debugfs_spi_regs[i].name,
  1639. debugfs_spi_regs[i].mode,
  1640. dd->dent_spi, &dd->reg_data[i],
  1641. &fops_iomem_x32);
  1642. }
  1643. }
  1644. }
  1645. static void spi_debugfs_exit(struct msm_spi *dd)
  1646. {
  1647. if (dd->dent_spi) {
  1648. int i;
  1649. debugfs_remove_recursive(dd->dent_spi);
  1650. dd->dent_spi = NULL;
  1651. for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
  1652. dd->debugfs_spi_regs[i] = NULL;
  1653. }
  1654. }
  1655. #else
  1656. static void spi_debugfs_init(struct msm_spi *dd) {}
  1657. static void spi_debugfs_exit(struct msm_spi *dd) {}
  1658. #endif
  1659. /* ===Device attributes begin=== */
  1660. static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
  1661. char *buf)
  1662. {
  1663. struct spi_master *master = dev_get_drvdata(dev);
  1664. struct msm_spi *dd = spi_master_get_devdata(master);
  1665. return snprintf(buf, PAGE_SIZE,
  1666. "Device %s\n"
  1667. "rx fifo_size = %d spi words\n"
  1668. "tx fifo_size = %d spi words\n"
  1669. "use_dma ? %s\n"
  1670. "rx block size = %d bytes\n"
  1671. "tx block size = %d bytes\n"
  1672. "input burst size = %d bytes\n"
  1673. "output burst size = %d bytes\n"
  1674. "DMA configuration:\n"
  1675. "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
  1676. "--statistics--\n"
  1677. "Rx isrs = %d\n"
  1678. "Tx isrs = %d\n"
  1679. "--debug--\n"
  1680. "NA yet\n",
  1681. dev_name(dev),
  1682. dd->input_fifo_size,
  1683. dd->output_fifo_size,
  1684. dd->use_dma ? "yes" : "no",
  1685. dd->input_block_size,
  1686. dd->output_block_size,
  1687. dd->input_burst_size,
  1688. dd->output_burst_size,
  1689. dd->tx_dma_chan,
  1690. dd->rx_dma_chan,
  1691. dd->tx_dma_crci,
  1692. dd->rx_dma_crci,
  1693. dd->stat_rx,
  1694. dd->stat_tx
  1695. );
  1696. }
  1697. /* Reset statistics on write */
  1698. static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
  1699. const char *buf, size_t count)
  1700. {
  1701. struct msm_spi *dd = dev_get_drvdata(dev);
  1702. dd->stat_rx = 0;
  1703. dd->stat_tx = 0;
  1704. return count;
  1705. }
  1706. static DEVICE_ATTR(stats, 0644, show_stats, set_stats);
  1707. static struct attribute *dev_attrs[] = {
  1708. &dev_attr_stats.attr,
  1709. NULL,
  1710. };
  1711. static struct attribute_group dev_attr_grp = {
  1712. .attrs = dev_attrs,
  1713. };
  1714. /* ===Device attributes end=== */
  1715. static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
  1716. enum msm_spi_pipe_direction pipe_dir)
  1717. {
  1718. struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
  1719. (&dd->bam.prod) : (&dd->bam.cons);
  1720. if (!pipe->teardown_required)
  1721. return;
  1722. msm_spi_bam_pipe_disconnect(dd, pipe);
  1723. dma_free_coherent(dd->dev, pipe->config.desc.size,
  1724. pipe->config.desc.base, pipe->config.desc.phys_base);
  1725. sps_free_endpoint(pipe->handle);
  1726. pipe->handle = NULL;
  1727. pipe->teardown_required = false;
  1728. }
  1729. static int msm_spi_bam_pipe_init(struct msm_spi *dd,
  1730. enum msm_spi_pipe_direction pipe_dir)
  1731. {
  1732. int rc = 0;
  1733. struct sps_pipe *pipe_handle;
  1734. struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
  1735. (&dd->bam.prod) : (&dd->bam.cons);
  1736. struct sps_connect *pipe_conf = &pipe->config;
  1737. pipe->name = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
  1738. pipe->handle = NULL;
  1739. pipe_handle = sps_alloc_endpoint();
  1740. if (!pipe_handle) {
  1741. dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
  1742. , __func__);
  1743. return -ENOMEM;
  1744. }
  1745. memset(pipe_conf, 0, sizeof(*pipe_conf));
  1746. rc = sps_get_config(pipe_handle, pipe_conf);
  1747. if (rc) {
  1748. dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
  1749. , __func__);
  1750. goto config_err;
  1751. }
  1752. if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
  1753. pipe_conf->source = dd->bam.handle;
  1754. pipe_conf->destination = SPS_DEV_HANDLE_MEM;
  1755. pipe_conf->mode = SPS_MODE_SRC;
  1756. pipe_conf->src_pipe_index =
  1757. dd->pdata->bam_producer_pipe_index;
  1758. pipe_conf->dest_pipe_index = 0;
  1759. } else {
  1760. pipe_conf->source = SPS_DEV_HANDLE_MEM;
  1761. pipe_conf->destination = dd->bam.handle;
  1762. pipe_conf->mode = SPS_MODE_DEST;
  1763. pipe_conf->src_pipe_index = 0;
  1764. pipe_conf->dest_pipe_index =
  1765. dd->pdata->bam_consumer_pipe_index;
  1766. }
  1767. pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
  1768. pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
  1769. pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
  1770. pipe_conf->desc.size,
  1771. &pipe_conf->desc.phys_base,
  1772. GFP_KERNEL);
  1773. if (!pipe_conf->desc.base) {
  1774. dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
  1775. , __func__);
  1776. rc = -ENOMEM;
  1777. goto config_err;
  1778. }
  1779. /* zero descriptor FIFO for convenient debugging of first descs */
  1780. memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
  1781. pipe->handle = pipe_handle;
  1782. return 0;
  1783. config_err:
  1784. sps_free_endpoint(pipe_handle);
  1785. return rc;
  1786. }
  1787. static void msm_spi_bam_teardown(struct msm_spi *dd)
  1788. {
  1789. msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
  1790. msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
  1791. if (dd->bam.deregister_required) {
  1792. sps_deregister_bam_device(dd->bam.handle);
  1793. dd->bam.deregister_required = false;
  1794. }
  1795. }
  1796. static int msm_spi_bam_init(struct msm_spi *dd)
  1797. {
  1798. struct sps_bam_props bam_props = {0};
  1799. uintptr_t bam_handle;
  1800. int rc = 0;
  1801. rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
  1802. if (rc || !bam_handle) {
  1803. bam_props.phys_addr = dd->bam.phys_addr;
  1804. bam_props.virt_addr = dd->bam.base;
  1805. bam_props.irq = dd->bam.irq;
  1806. bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  1807. bam_props.summing_threshold = 0x10;
  1808. rc = sps_register_bam_device(&bam_props, &bam_handle);
  1809. if (rc) {
  1810. dev_err(dd->dev,
  1811. "%s: Failed to register BAM device",
  1812. __func__);
  1813. return rc;
  1814. }
  1815. dd->bam.deregister_required = true;
  1816. }
  1817. dd->bam.handle = bam_handle;
  1818. rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
  1819. if (rc) {
  1820. dev_err(dd->dev,
  1821. "%s: Failed to init producer BAM-pipe",
  1822. __func__);
  1823. goto bam_init_error;
  1824. }
  1825. rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
  1826. if (rc) {
  1827. dev_err(dd->dev,
  1828. "%s: Failed to init consumer BAM-pipe",
  1829. __func__);
  1830. goto bam_init_error;
  1831. }
  1832. return 0;
  1833. bam_init_error:
  1834. msm_spi_bam_teardown(dd);
  1835. return rc;
  1836. }
  1837. enum msm_spi_dt_entry_status {
  1838. DT_REQ, /* Required: fail if missing */
  1839. DT_SGST, /* Suggested: warn if missing */
  1840. DT_OPT, /* Optional: don't warn if missing */
  1841. };
  1842. enum msm_spi_dt_entry_type {
  1843. DT_U32,
  1844. DT_GPIO,
  1845. DT_BOOL,
  1846. };
  1847. struct msm_spi_dt_to_pdata_map {
  1848. const char *dt_name;
  1849. void *ptr_data;
  1850. enum msm_spi_dt_entry_status status;
  1851. enum msm_spi_dt_entry_type type;
  1852. int default_val;
  1853. };
  1854. static int msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
  1855. struct msm_spi_platform_data *pdata,
  1856. struct msm_spi_dt_to_pdata_map *itr)
  1857. {
  1858. int ret, err = 0;
  1859. struct device_node *node = pdev->dev.of_node;
  1860. for (; itr->dt_name; ++itr) {
  1861. switch (itr->type) {
  1862. case DT_GPIO:
  1863. ret = of_get_named_gpio(node, itr->dt_name, 0);
  1864. if (ret >= 0) {
  1865. *((int *) itr->ptr_data) = ret;
  1866. ret = 0;
  1867. }
  1868. break;
  1869. case DT_U32:
  1870. ret = of_property_read_u32(node, itr->dt_name,
  1871. (u32 *) itr->ptr_data);
  1872. break;
  1873. case DT_BOOL:
  1874. *((bool *) itr->ptr_data) =
  1875. of_property_read_bool(node, itr->dt_name);
  1876. ret = 0;
  1877. break;
  1878. default:
  1879. dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
  1880. itr->type);
  1881. ret = -EBADE;
  1882. }
  1883. dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
  1884. ret, itr->dt_name, *((int *)itr->ptr_data));
  1885. if (ret) {
  1886. *((int *)itr->ptr_data) = itr->default_val;
  1887. if (itr->status < DT_OPT) {
  1888. dev_err(&pdev->dev, "Missing '%s' DT entry\n",
  1889. itr->dt_name);
  1890. /* cont on err to dump all missing entries */
  1891. if (itr->status == DT_REQ && !err)
  1892. err = ret;
  1893. }
  1894. }
  1895. }
  1896. return err;
  1897. }
  1898. /**
  1899. * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
  1900. */
  1901. static struct msm_spi_platform_data *msm_spi_dt_to_pdata(
  1902. struct platform_device *pdev, struct msm_spi *dd)
  1903. {
  1904. struct msm_spi_platform_data *pdata;
  1905. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  1906. if (!pdata)
  1907. return NULL;
  1908. if (pdata) {
  1909. struct msm_spi_dt_to_pdata_map map[] = {
  1910. {"spi-max-frequency",
  1911. &pdata->max_clock_speed, DT_SGST, DT_U32, 0},
  1912. {"qcom,infinite-mode",
  1913. &pdata->infinite_mode, DT_OPT, DT_U32, 0},
  1914. {"qcom,master-id",
  1915. &pdata->master_id, DT_SGST, DT_U32, 0},
  1916. {"qcom,bus-width",
  1917. &pdata->bus_width, DT_OPT, DT_U32, 8},
  1918. {"qcom,ver-reg-exists",
  1919. &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0},
  1920. {"qcom,use-bam",
  1921. &pdata->use_bam, DT_OPT, DT_BOOL, 0},
  1922. {"qcom,use-pinctrl",
  1923. &pdata->use_pinctrl, DT_OPT, DT_BOOL, 0},
  1924. {"qcom,bam-consumer-pipe-index",
  1925. &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0},
  1926. {"qcom,bam-producer-pipe-index",
  1927. &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0},
  1928. {"qcom,gpio-clk",
  1929. &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1},
  1930. {"qcom,gpio-miso",
  1931. &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1},
  1932. {"qcom,gpio-mosi",
  1933. &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1},
  1934. {"qcom,gpio-cs0",
  1935. &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1},
  1936. {"qcom,gpio-cs1",
  1937. &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1},
  1938. {"qcom,gpio-cs2",
  1939. &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1},
  1940. {"qcom,gpio-cs3",
  1941. &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1},
  1942. {"qcom,rt-priority",
  1943. &pdata->rt_priority, DT_OPT, DT_BOOL, 0},
  1944. {"qcom,shared",
  1945. &pdata->is_shared, DT_OPT, DT_BOOL, 0},
  1946. {NULL, NULL, 0, 0, 0},
  1947. };
  1948. if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) {
  1949. devm_kfree(&pdev->dev, pdata);
  1950. return NULL;
  1951. }
  1952. }
  1953. if (pdata->use_bam) {
  1954. if (!pdata->bam_consumer_pipe_index) {
  1955. dev_warn(&pdev->dev,
  1956. "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
  1957. pdata->use_bam = false;
  1958. }
  1959. if (!pdata->bam_producer_pipe_index) {
  1960. dev_warn(&pdev->dev,
  1961. "missing qcom,bam-producer-pipe-index entry in device-tree\n");
  1962. pdata->use_bam = false;
  1963. }
  1964. }
  1965. return pdata;
  1966. }
  1967. static int msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
  1968. {
  1969. u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
  1970. return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
  1971. : SPI_QUP_VERSION_NONE;
  1972. }
  1973. static int msm_spi_bam_get_resources(struct msm_spi *dd,
  1974. struct platform_device *pdev, struct spi_master *master)
  1975. {
  1976. struct resource *resource;
  1977. size_t bam_mem_size;
  1978. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1979. "spi_bam_physical");
  1980. if (!resource) {
  1981. dev_warn(&pdev->dev,
  1982. "%s: Missing spi_bam_physical entry in DT",
  1983. __func__);
  1984. return -ENXIO;
  1985. }
  1986. dd->bam.phys_addr = resource->start;
  1987. bam_mem_size = resource_size(resource);
  1988. dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
  1989. bam_mem_size);
  1990. if (!dd->bam.base) {
  1991. dev_warn(&pdev->dev,
  1992. "%s: Failed to ioremap(spi_bam_physical)",
  1993. __func__);
  1994. return -ENXIO;
  1995. }
  1996. dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
  1997. if (dd->bam.irq < 0) {
  1998. dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
  1999. __func__);
  2000. return -EINVAL;
  2001. }
  2002. dd->dma_init = msm_spi_bam_init;
  2003. dd->dma_teardown = msm_spi_bam_teardown;
  2004. return 0;
  2005. }
  2006. static int init_resources(struct platform_device *pdev)
  2007. {
  2008. struct spi_master *master = platform_get_drvdata(pdev);
  2009. struct msm_spi *dd;
  2010. int rc = -ENXIO;
  2011. int clk_enabled = 0;
  2012. int pclk_enabled = 0;
  2013. dd = spi_master_get_devdata(master);
  2014. if (dd->pdata && dd->pdata->use_pinctrl) {
  2015. rc = msm_spi_pinctrl_init(dd);
  2016. if (rc) {
  2017. dev_err(&pdev->dev, "%s: pinctrl init failed\n",
  2018. __func__);
  2019. return rc;
  2020. }
  2021. }
  2022. mutex_lock(&dd->core_lock);
  2023. dd->clk = clk_get(&pdev->dev, "core_clk");
  2024. if (IS_ERR(dd->clk)) {
  2025. dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
  2026. rc = PTR_ERR(dd->clk);
  2027. goto err_clk_get;
  2028. }
  2029. dd->pclk = clk_get(&pdev->dev, "iface_clk");
  2030. if (IS_ERR(dd->pclk)) {
  2031. dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
  2032. rc = PTR_ERR(dd->pclk);
  2033. goto err_pclk_get;
  2034. }
  2035. if (dd->pdata && dd->pdata->max_clock_speed)
  2036. msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
  2037. rc = clk_prepare_enable(dd->clk);
  2038. if (rc) {
  2039. dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
  2040. __func__);
  2041. goto err_clk_enable;
  2042. }
  2043. clk_enabled = 1;
  2044. rc = clk_prepare_enable(dd->pclk);
  2045. if (rc) {
  2046. dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
  2047. __func__);
  2048. goto err_pclk_enable;
  2049. }
  2050. pclk_enabled = 1;
  2051. if (dd->pdata && dd->pdata->ver_reg_exists) {
  2052. enum msm_spi_qup_version ver =
  2053. msm_spi_get_qup_hw_ver(&pdev->dev, dd);
  2054. if (dd->qup_ver != ver)
  2055. dev_warn(&pdev->dev,
  2056. "%s: HW version different then initially assumed by probe",
  2057. __func__);
  2058. }
  2059. /* GSBI dose not exists on B-family MSM-chips */
  2060. if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
  2061. rc = msm_spi_configure_gsbi(dd, pdev);
  2062. if (rc)
  2063. goto err_config_gsbi;
  2064. }
  2065. msm_spi_calculate_fifo_size(dd);
  2066. if (dd->use_dma) {
  2067. rc = dd->dma_init(dd);
  2068. if (rc) {
  2069. dev_err(&pdev->dev,
  2070. "%s: failed to init DMA. Disabling DMA mode\n",
  2071. __func__);
  2072. dd->use_dma = 0;
  2073. }
  2074. }
  2075. msm_spi_register_init(dd);
  2076. /*
  2077. * The SPI core generates a bogus input overrun error on some targets,
  2078. * when a transition from run to reset state occurs and if the FIFO has
  2079. * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
  2080. * bit.
  2081. */
  2082. msm_spi_enable_error_flags(dd);
  2083. writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
  2084. rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
  2085. if (rc)
  2086. goto err_spi_state;
  2087. clk_disable_unprepare(dd->clk);
  2088. clk_disable_unprepare(dd->pclk);
  2089. clk_enabled = 0;
  2090. pclk_enabled = 0;
  2091. dd->transfer_pending = 0;
  2092. dd->tx_mode = SPI_MODE_NONE;
  2093. dd->rx_mode = SPI_MODE_NONE;
  2094. rc = msm_spi_request_irq(dd, pdev, master);
  2095. if (rc)
  2096. goto err_irq;
  2097. msm_spi_disable_irqs(dd);
  2098. mutex_unlock(&dd->core_lock);
  2099. return 0;
  2100. err_irq:
  2101. err_spi_state:
  2102. if (dd->use_dma && dd->dma_teardown)
  2103. dd->dma_teardown(dd);
  2104. err_config_gsbi:
  2105. if (pclk_enabled)
  2106. clk_disable_unprepare(dd->pclk);
  2107. err_pclk_enable:
  2108. if (clk_enabled)
  2109. clk_disable_unprepare(dd->clk);
  2110. err_clk_enable:
  2111. clk_put(dd->pclk);
  2112. err_pclk_get:
  2113. clk_put(dd->clk);
  2114. err_clk_get:
  2115. mutex_unlock(&dd->core_lock);
  2116. return rc;
  2117. }
  2118. static int msm_spi_probe(struct platform_device *pdev)
  2119. {
  2120. struct spi_master *master;
  2121. struct msm_spi *dd;
  2122. struct resource *resource;
  2123. int i = 0;
  2124. int rc = -ENXIO;
  2125. struct msm_spi_platform_data *pdata;
  2126. master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
  2127. if (!master) {
  2128. rc = -ENOMEM;
  2129. dev_err(&pdev->dev, "master allocation failed\n");
  2130. goto err_probe_exit;
  2131. }
  2132. master->bus_num = pdev->id;
  2133. master->mode_bits = SPI_SUPPORTED_MODES;
  2134. master->num_chipselect = SPI_NUM_CHIPSELECTS;
  2135. master->set_cs = msm_spi_set_cs;
  2136. master->setup = msm_spi_setup;
  2137. master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware;
  2138. master->transfer_one = msm_spi_transfer_one;
  2139. master->unprepare_transfer_hardware
  2140. = msm_spi_unprepare_transfer_hardware;
  2141. platform_set_drvdata(pdev, master);
  2142. dd = spi_master_get_devdata(master);
  2143. if (pdev->dev.of_node) {
  2144. dd->qup_ver = SPI_QUP_VERSION_BFAM;
  2145. master->dev.of_node = pdev->dev.of_node;
  2146. pdata = msm_spi_dt_to_pdata(pdev, dd);
  2147. if (!pdata) {
  2148. dev_err(&pdev->dev, "platform data allocation failed\n");
  2149. rc = -ENOMEM;
  2150. goto err_probe_exit;
  2151. }
  2152. rc = of_alias_get_id(pdev->dev.of_node, "spi");
  2153. if (rc < 0)
  2154. dev_warn(&pdev->dev,
  2155. "using default bus_num %d\n", pdev->id);
  2156. else
  2157. master->bus_num = pdev->id = rc;
  2158. } else {
  2159. pdata = pdev->dev.platform_data;
  2160. dd->qup_ver = SPI_QUP_VERSION_NONE;
  2161. for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
  2162. resource = platform_get_resource(pdev, IORESOURCE_IO,
  2163. i);
  2164. dd->spi_gpios[i] = resource ? resource->start : -1;
  2165. }
  2166. for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
  2167. resource = platform_get_resource(pdev, IORESOURCE_IO,
  2168. i + ARRAY_SIZE(spi_rsrcs));
  2169. dd->cs_gpios[i].gpio_num = resource ?
  2170. resource->start : -1;
  2171. }
  2172. }
  2173. for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
  2174. dd->cs_gpios[i].valid = 0;
  2175. dd->pdata = pdata;
  2176. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2177. if (!resource) {
  2178. rc = -ENXIO;
  2179. goto err_probe_res;
  2180. }
  2181. dd->mem_phys_addr = resource->start;
  2182. dd->mem_size = resource_size(resource);
  2183. dd->dev = &pdev->dev;
  2184. if (pdata) {
  2185. master->rt = pdata->rt_priority;
  2186. if (pdata->dma_config) {
  2187. rc = pdata->dma_config();
  2188. if (rc) {
  2189. dev_warn(&pdev->dev,
  2190. "%s: DM mode not supported\n",
  2191. __func__);
  2192. dd->use_dma = 0;
  2193. goto skip_dma_resources;
  2194. }
  2195. }
  2196. if (!dd->pdata->use_bam)
  2197. goto skip_dma_resources;
  2198. rc = msm_spi_bam_get_resources(dd, pdev, master);
  2199. if (rc) {
  2200. dev_warn(dd->dev,
  2201. "%s: Failed to get BAM resources",
  2202. __func__);
  2203. goto skip_dma_resources;
  2204. }
  2205. dd->use_dma = 1;
  2206. }
  2207. spi_dma_mask(&pdev->dev);
  2208. skip_dma_resources:
  2209. spin_lock_init(&dd->queue_lock);
  2210. mutex_init(&dd->core_lock);
  2211. init_waitqueue_head(&dd->continue_suspend);
  2212. if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
  2213. dd->mem_size, SPI_DRV_NAME)) {
  2214. rc = -ENXIO;
  2215. goto err_probe_reqmem;
  2216. }
  2217. dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
  2218. if (!dd->base) {
  2219. rc = -ENOMEM;
  2220. goto err_probe_reqmem;
  2221. }
  2222. pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
  2223. pm_runtime_use_autosuspend(&pdev->dev);
  2224. pm_runtime_enable(&pdev->dev);
  2225. dd->suspended = 1;
  2226. rc = spi_register_master(master);
  2227. if (rc)
  2228. goto err_probe_reg_master;
  2229. rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
  2230. if (rc) {
  2231. dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
  2232. goto err_attrs;
  2233. }
  2234. spi_debugfs_init(dd);
  2235. return 0;
  2236. err_attrs:
  2237. spi_unregister_master(master);
  2238. err_probe_reg_master:
  2239. pm_runtime_disable(&pdev->dev);
  2240. err_probe_reqmem:
  2241. err_probe_res:
  2242. spi_master_put(master);
  2243. err_probe_exit:
  2244. return rc;
  2245. }
  2246. static int msm_spi_pm_suspend_runtime(struct device *device)
  2247. {
  2248. struct platform_device *pdev = to_platform_device(device);
  2249. struct spi_master *master = platform_get_drvdata(pdev);
  2250. struct msm_spi *dd;
  2251. unsigned long flags;
  2252. dev_dbg(device, "pm_runtime: suspending...\n");
  2253. if (!master)
  2254. goto suspend_exit;
  2255. dd = spi_master_get_devdata(master);
  2256. if (!dd)
  2257. goto suspend_exit;
  2258. if (dd->suspended)
  2259. return 0;
  2260. /*
  2261. * Make sure nothing is added to the queue while we're
  2262. * suspending
  2263. */
  2264. spin_lock_irqsave(&dd->queue_lock, flags);
  2265. dd->suspended = 1;
  2266. spin_unlock_irqrestore(&dd->queue_lock, flags);
  2267. /* Wait for transactions to end, or time out */
  2268. wait_event_interruptible(dd->continue_suspend,
  2269. !dd->transfer_pending);
  2270. mutex_lock(&dd->core_lock);
  2271. if (dd->pdata && !dd->pdata->is_shared && dd->use_dma) {
  2272. msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
  2273. msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
  2274. }
  2275. if (dd->pdata && !dd->pdata->is_shared)
  2276. put_local_resources(dd);
  2277. if (dd->pdata)
  2278. msm_spi_clk_path_vote(dd, 0);
  2279. mutex_unlock(&dd->core_lock);
  2280. suspend_exit:
  2281. return 0;
  2282. }
  2283. static int msm_spi_pm_resume_runtime(struct device *device)
  2284. {
  2285. struct platform_device *pdev = to_platform_device(device);
  2286. struct spi_master *master = platform_get_drvdata(pdev);
  2287. struct msm_spi *dd;
  2288. int ret = 0;
  2289. dev_dbg(device, "pm_runtime: resuming...\n");
  2290. if (!master)
  2291. goto resume_exit;
  2292. dd = spi_master_get_devdata(master);
  2293. if (!dd)
  2294. goto resume_exit;
  2295. if (!dd->suspended)
  2296. return 0;
  2297. if (!dd->is_init_complete) {
  2298. ret = init_resources(pdev);
  2299. if (ret != 0)
  2300. return ret;
  2301. dd->is_init_complete = true;
  2302. }
  2303. msm_spi_clk_path_init(dd);
  2304. msm_spi_clk_path_vote(dd, dd->pdata->max_clock_speed);
  2305. if (!dd->pdata->is_shared) {
  2306. ret = get_local_resources(dd);
  2307. if (ret)
  2308. return ret;
  2309. }
  2310. if (!dd->pdata->is_shared && dd->use_dma) {
  2311. msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
  2312. &dd->bam.prod.config);
  2313. msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
  2314. &dd->bam.cons.config);
  2315. }
  2316. dd->suspended = 0;
  2317. resume_exit:
  2318. return 0;
  2319. }
  2320. #ifdef CONFIG_PM_SLEEP
  2321. static int msm_spi_suspend(struct device *device)
  2322. {
  2323. if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
  2324. struct platform_device *pdev = to_platform_device(device);
  2325. struct spi_master *master = platform_get_drvdata(pdev);
  2326. struct msm_spi *dd;
  2327. dev_dbg(device, "system suspend");
  2328. if (!master)
  2329. goto suspend_exit;
  2330. dd = spi_master_get_devdata(master);
  2331. if (!dd)
  2332. goto suspend_exit;
  2333. msm_spi_pm_suspend_runtime(device);
  2334. /*
  2335. * set the device's runtime PM status to 'suspended'
  2336. */
  2337. pm_runtime_disable(device);
  2338. pm_runtime_set_suspended(device);
  2339. pm_runtime_enable(device);
  2340. }
  2341. suspend_exit:
  2342. return 0;
  2343. }
  2344. static int msm_spi_resume(struct device *device)
  2345. {
  2346. /*
  2347. * Rely on runtime-PM to call resume in case it is enabled
  2348. * Even if it's not enabled, rely on 1st client transaction to do
  2349. * clock ON and gpio configuration
  2350. */
  2351. dev_dbg(device, "system resume");
  2352. return 0;
  2353. }
  2354. #else
  2355. #define msm_spi_suspend NULL
  2356. #define msm_spi_resume NULL
  2357. #endif
  2358. static int msm_spi_remove(struct platform_device *pdev)
  2359. {
  2360. struct spi_master *master = platform_get_drvdata(pdev);
  2361. struct msm_spi *dd = spi_master_get_devdata(master);
  2362. spi_debugfs_exit(dd);
  2363. sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
  2364. if (dd->dma_teardown)
  2365. dd->dma_teardown(dd);
  2366. pm_runtime_disable(&pdev->dev);
  2367. pm_runtime_set_suspended(&pdev->dev);
  2368. clk_put(dd->clk);
  2369. clk_put(dd->pclk);
  2370. msm_spi_clk_path_teardown(dd);
  2371. platform_set_drvdata(pdev, NULL);
  2372. spi_unregister_master(master);
  2373. spi_master_put(master);
  2374. return 0;
  2375. }
  2376. static const struct of_device_id msm_spi_dt_match[] = {
  2377. {
  2378. .compatible = "qcom,spi-qup-v2",
  2379. },
  2380. {}
  2381. };
  2382. static const struct dev_pm_ops msm_spi_dev_pm_ops = {
  2383. SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
  2384. SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
  2385. msm_spi_pm_resume_runtime, NULL)
  2386. };
  2387. static struct platform_driver msm_spi_driver = {
  2388. .driver = {
  2389. .name = SPI_DRV_NAME,
  2390. .owner = THIS_MODULE,
  2391. .pm = &msm_spi_dev_pm_ops,
  2392. .of_match_table = msm_spi_dt_match,
  2393. },
  2394. .probe = msm_spi_probe,
  2395. .remove = msm_spi_remove,
  2396. };
  2397. module_platform_driver(msm_spi_driver);
  2398. MODULE_LICENSE("GPL v2");
  2399. MODULE_ALIAS("platform:"SPI_DRV_NAME);