slim-msm-ctrl.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641
  1. /* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/io.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slimbus/slimbus.h>
  20. #include <linux/delay.h>
  21. #include <linux/kthread.h>
  22. #include <linux/clk.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/of.h>
  25. #include <linux/of_slimbus.h>
  26. #include <linux/msm-sps.h>
  27. #include <linux/qdsp6v2/apr.h>
  28. #include "slim-msm.h"
  29. #define MSM_SLIM_NAME "msm_slim_ctrl"
  30. #define SLIM_ROOT_FREQ 24576000
  31. #define QC_MSM_DEVS 5
  32. /* Manager registers */
  33. enum mgr_reg {
  34. MGR_CFG = 0x200,
  35. MGR_STATUS = 0x204,
  36. MGR_RX_MSGQ_CFG = 0x208,
  37. MGR_INT_EN = 0x210,
  38. MGR_INT_STAT = 0x214,
  39. MGR_INT_CLR = 0x218,
  40. MGR_TX_MSG = 0x230,
  41. MGR_RX_MSG = 0x270,
  42. MGR_IE_STAT = 0x2F0,
  43. MGR_VE_STAT = 0x300,
  44. };
  45. enum msg_cfg {
  46. MGR_CFG_ENABLE = 1,
  47. MGR_CFG_RX_MSGQ_EN = 1 << 1,
  48. MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
  49. MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
  50. };
  51. /* Message queue types */
  52. enum msm_slim_msgq_type {
  53. MSGQ_RX = 0,
  54. MSGQ_TX_LOW = 1,
  55. MSGQ_TX_HIGH = 2,
  56. };
  57. /* Framer registers */
  58. enum frm_reg {
  59. FRM_CFG = 0x400,
  60. FRM_STAT = 0x404,
  61. FRM_INT_EN = 0x410,
  62. FRM_INT_STAT = 0x414,
  63. FRM_INT_CLR = 0x418,
  64. FRM_WAKEUP = 0x41C,
  65. FRM_CLKCTL_DONE = 0x420,
  66. FRM_IE_STAT = 0x430,
  67. FRM_VE_STAT = 0x440,
  68. };
  69. /* Interface registers */
  70. enum intf_reg {
  71. INTF_CFG = 0x600,
  72. INTF_STAT = 0x604,
  73. INTF_INT_EN = 0x610,
  74. INTF_INT_STAT = 0x614,
  75. INTF_INT_CLR = 0x618,
  76. INTF_IE_STAT = 0x630,
  77. INTF_VE_STAT = 0x640,
  78. };
  79. enum mgr_intr {
  80. MGR_INT_RECFG_DONE = 1 << 24,
  81. MGR_INT_TX_NACKED_2 = 1 << 25,
  82. MGR_INT_MSG_BUF_CONTE = 1 << 26,
  83. MGR_INT_RX_MSG_RCVD = 1 << 30,
  84. MGR_INT_TX_MSG_SENT = 1 << 31,
  85. };
  86. enum frm_cfg {
  87. FRM_ACTIVE = 1,
  88. CLK_GEAR = 7,
  89. ROOT_FREQ = 11,
  90. REF_CLK_GEAR = 15,
  91. INTR_WAKE = 19,
  92. };
  93. static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
  94. static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
  95. {
  96. struct msm_slim_ctrl *dev = sat->dev;
  97. unsigned long flags;
  98. spin_lock_irqsave(&sat->lock, flags);
  99. if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
  100. spin_unlock_irqrestore(&sat->lock, flags);
  101. dev_err(dev->dev, "SAT QUEUE full!");
  102. return -EXFULL;
  103. }
  104. memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
  105. sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
  106. spin_unlock_irqrestore(&sat->lock, flags);
  107. return 0;
  108. }
  109. static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
  110. {
  111. unsigned long flags;
  112. spin_lock_irqsave(&sat->lock, flags);
  113. if (sat->stail == sat->shead) {
  114. spin_unlock_irqrestore(&sat->lock, flags);
  115. return -ENODATA;
  116. }
  117. memcpy(buf, sat->sat_msgs[sat->shead], 40);
  118. sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
  119. spin_unlock_irqrestore(&sat->lock, flags);
  120. return 0;
  121. }
  122. static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
  123. {
  124. e_addr[0] = (buffer[1] >> 24) & 0xff;
  125. e_addr[1] = (buffer[1] >> 16) & 0xff;
  126. e_addr[2] = (buffer[1] >> 8) & 0xff;
  127. e_addr[3] = buffer[1] & 0xff;
  128. e_addr[4] = (buffer[0] >> 24) & 0xff;
  129. e_addr[5] = (buffer[0] >> 16) & 0xff;
  130. }
  131. static bool msm_is_sat_dev(u8 *e_addr)
  132. {
  133. if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
  134. e_addr[2] != QC_CHIPID_SL &&
  135. (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
  136. return true;
  137. return false;
  138. }
  139. static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
  140. {
  141. struct msm_slim_sat *sat = NULL;
  142. int i = 0;
  143. while (!sat && i < dev->nsats) {
  144. if (laddr == dev->satd[i]->satcl.laddr)
  145. sat = dev->satd[i];
  146. i++;
  147. }
  148. return sat;
  149. }
  150. static irqreturn_t msm_slim_interrupt(int irq, void *d)
  151. {
  152. struct msm_slim_ctrl *dev = d;
  153. u32 pstat;
  154. u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
  155. if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
  156. if (stat & MGR_INT_TX_MSG_SENT)
  157. writel_relaxed(MGR_INT_TX_MSG_SENT,
  158. dev->base + MGR_INT_CLR);
  159. else {
  160. u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
  161. u32 mgr_ie_stat = readl_relaxed(dev->base +
  162. MGR_IE_STAT);
  163. u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
  164. u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
  165. u32 frm_intr_stat = readl_relaxed(dev->base +
  166. FRM_INT_STAT);
  167. u32 frm_ie_stat = readl_relaxed(dev->base +
  168. FRM_IE_STAT);
  169. u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
  170. u32 intf_intr_stat = readl_relaxed(dev->base +
  171. INTF_INT_STAT);
  172. u32 intf_ie_stat = readl_relaxed(dev->base +
  173. INTF_IE_STAT);
  174. writel_relaxed(MGR_INT_TX_NACKED_2,
  175. dev->base + MGR_INT_CLR);
  176. pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
  177. stat, mgr_stat);
  178. pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
  179. pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
  180. frm_intr_stat, frm_stat);
  181. pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
  182. frm_cfg, frm_ie_stat);
  183. pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
  184. intf_intr_stat, intf_stat);
  185. pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
  186. dev->err = -EIO;
  187. }
  188. /*
  189. * Guarantee that interrupt clear bit write goes through before
  190. * signalling completion/exiting ISR
  191. */
  192. mb();
  193. msm_slim_manage_tx_msgq(dev, false, NULL);
  194. }
  195. if (stat & MGR_INT_RX_MSG_RCVD) {
  196. u32 rx_buf[10];
  197. u32 mc, mt;
  198. u8 len, i;
  199. rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
  200. len = rx_buf[0] & 0x1F;
  201. for (i = 1; i < ((len + 3) >> 2); i++) {
  202. rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
  203. (4 * i));
  204. dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
  205. }
  206. mt = (rx_buf[0] >> 5) & 0x7;
  207. mc = (rx_buf[0] >> 8) & 0xff;
  208. dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
  209. if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
  210. mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
  211. u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
  212. struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
  213. if (sat)
  214. msm_sat_enqueue(sat, rx_buf, len);
  215. else
  216. dev_err(dev->dev, "unknown sat:%d message",
  217. laddr);
  218. writel_relaxed(MGR_INT_RX_MSG_RCVD,
  219. dev->base + MGR_INT_CLR);
  220. /*
  221. * Guarantee that CLR bit write goes through before
  222. * queuing work
  223. */
  224. mb();
  225. if (sat)
  226. queue_work(sat->wq, &sat->wd);
  227. } else if (mt == SLIM_MSG_MT_CORE &&
  228. mc == SLIM_MSG_MC_REPORT_PRESENT) {
  229. u8 e_addr[6];
  230. msm_get_eaddr(e_addr, rx_buf);
  231. msm_slim_rx_enqueue(dev, rx_buf, len);
  232. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  233. MGR_INT_CLR);
  234. /*
  235. * Guarantee that CLR bit write goes through
  236. * before signalling completion
  237. */
  238. mb();
  239. complete(&dev->rx_msgq_notify);
  240. } else if (mt == SLIM_MSG_MT_CORE &&
  241. mc == SLIM_MSG_MC_REPORT_ABSENT) {
  242. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  243. MGR_INT_CLR);
  244. /*
  245. * Guarantee that CLR bit write goes through
  246. * before signalling completion
  247. */
  248. mb();
  249. complete(&dev->rx_msgq_notify);
  250. } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
  251. mc == SLIM_MSG_MC_REPLY_VALUE) {
  252. msm_slim_rx_enqueue(dev, rx_buf, len);
  253. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  254. MGR_INT_CLR);
  255. /*
  256. * Guarantee that CLR bit write goes through
  257. * before signalling completion
  258. */
  259. mb();
  260. complete(&dev->rx_msgq_notify);
  261. } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
  262. u8 *buf = (u8 *)rx_buf;
  263. u8 l_addr = buf[2];
  264. u16 ele = (u16)buf[4] << 4;
  265. ele |= ((buf[3] & 0xf0) >> 4);
  266. dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
  267. l_addr, ele);
  268. for (i = 0; i < len - 5; i++)
  269. dev_err(dev->dev, "offset:0x%x:bit mask:%x",
  270. i, buf[i+5]);
  271. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  272. MGR_INT_CLR);
  273. /*
  274. * Guarantee that CLR bit write goes through
  275. * before exiting
  276. */
  277. mb();
  278. } else {
  279. dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
  280. mc, mt, len);
  281. for (i = 0; i < ((len + 3) >> 2); i++)
  282. dev_err(dev->dev, "error msg: %x", rx_buf[i]);
  283. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  284. MGR_INT_CLR);
  285. /*
  286. * Guarantee that CLR bit write goes through
  287. * before exiting
  288. */
  289. mb();
  290. }
  291. }
  292. if (stat & MGR_INT_RECFG_DONE) {
  293. writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
  294. /*
  295. * Guarantee that CLR bit write goes through
  296. * before exiting ISR
  297. */
  298. mb();
  299. complete(&dev->reconf);
  300. }
  301. pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
  302. if (pstat != 0)
  303. return msm_slim_port_irq_handler(dev, pstat);
  304. return IRQ_HANDLED;
  305. }
  306. static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  307. {
  308. DECLARE_COMPLETION_ONSTACK(done);
  309. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  310. u32 *pbuf;
  311. u8 *puc;
  312. int timeout;
  313. int msgv = -1;
  314. u8 la = txn->la;
  315. u8 mc = (u8)(txn->mc & 0xFF);
  316. /*
  317. * Voting for runtime PM: Slimbus has 2 possible use cases:
  318. * 1. messaging
  319. * 2. Data channels
  320. * Messaging case goes through messaging slots and data channels
  321. * use their own slots
  322. * This "get" votes for messaging bandwidth
  323. */
  324. if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
  325. msgv = msm_slim_get_ctrl(dev);
  326. if (msgv >= 0)
  327. dev->state = MSM_CTRL_AWAKE;
  328. mutex_lock(&dev->tx_lock);
  329. if (dev->state == MSM_CTRL_ASLEEP ||
  330. ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
  331. dev->state == MSM_CTRL_IDLE)) {
  332. dev_err(dev->dev, "runtime or system PM suspended state");
  333. mutex_unlock(&dev->tx_lock);
  334. if (msgv >= 0)
  335. msm_slim_put_ctrl(dev);
  336. return -EBUSY;
  337. }
  338. if (txn->mt == SLIM_MSG_MT_CORE &&
  339. mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
  340. if (dev->reconf_busy) {
  341. wait_for_completion(&dev->reconf);
  342. dev->reconf_busy = false;
  343. }
  344. /* This "get" votes for data channels */
  345. if (dev->ctrl.sched.usedslots != 0 &&
  346. !dev->chan_active) {
  347. int chv = msm_slim_get_ctrl(dev);
  348. if (chv >= 0)
  349. dev->chan_active = true;
  350. }
  351. }
  352. txn->rl--;
  353. pbuf = msm_get_msg_buf(dev, txn->rl, &done);
  354. dev->err = 0;
  355. if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
  356. mutex_unlock(&dev->tx_lock);
  357. if (msgv >= 0)
  358. msm_slim_put_ctrl(dev);
  359. return -EPROTONOSUPPORT;
  360. }
  361. if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
  362. (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
  363. mc == SLIM_MSG_MC_CONNECT_SINK ||
  364. mc == SLIM_MSG_MC_DISCONNECT_PORT))
  365. la = dev->pgdla;
  366. if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
  367. *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
  368. else
  369. *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
  370. if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
  371. puc = ((u8 *)pbuf) + 3;
  372. else
  373. puc = ((u8 *)pbuf) + 2;
  374. if (txn->rbuf)
  375. *(puc++) = txn->tid;
  376. if ((txn->mt == SLIM_MSG_MT_CORE) &&
  377. ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
  378. mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
  379. (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
  380. mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
  381. *(puc++) = (txn->ec & 0xFF);
  382. *(puc++) = (txn->ec >> 8)&0xFF;
  383. }
  384. if (txn->wbuf)
  385. memcpy(puc, txn->wbuf, txn->len);
  386. if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
  387. (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
  388. mc == SLIM_MSG_MC_CONNECT_SINK ||
  389. mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
  390. if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
  391. dev->err = msm_slim_connect_pipe_port(dev, *puc);
  392. else {
  393. /*
  394. * Remove channel disconnects master-side ports from
  395. * channel. No need to send that again on the bus
  396. * Only disable port
  397. */
  398. writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
  399. dev->pipes[*puc].port_b, dev->ver));
  400. mutex_unlock(&dev->tx_lock);
  401. if (msgv >= 0)
  402. msm_slim_put_ctrl(dev);
  403. return 0;
  404. }
  405. if (dev->err) {
  406. dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
  407. mutex_unlock(&dev->tx_lock);
  408. if (msgv >= 0)
  409. msm_slim_put_ctrl(dev);
  410. return dev->err;
  411. }
  412. *(puc) = (u8)dev->pipes[*puc].port_b;
  413. }
  414. if (txn->mt == SLIM_MSG_MT_CORE &&
  415. mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
  416. dev->reconf_busy = true;
  417. msm_send_msg_buf(dev, pbuf, txn->rl, MGR_TX_MSG);
  418. timeout = wait_for_completion_timeout(&done, HZ);
  419. if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
  420. if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
  421. SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
  422. timeout) {
  423. timeout = wait_for_completion_timeout(&dev->reconf, HZ);
  424. dev->reconf_busy = false;
  425. if (timeout) {
  426. clk_disable_unprepare(dev->rclk);
  427. disable_irq(dev->irq);
  428. }
  429. }
  430. if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
  431. SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
  432. !timeout) {
  433. dev->reconf_busy = false;
  434. dev_err(dev->dev, "clock pause failed");
  435. mutex_unlock(&dev->tx_lock);
  436. return -ETIMEDOUT;
  437. }
  438. if (txn->mt == SLIM_MSG_MT_CORE &&
  439. txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
  440. if (dev->ctrl.sched.usedslots == 0 &&
  441. dev->chan_active) {
  442. dev->chan_active = false;
  443. msm_slim_put_ctrl(dev);
  444. }
  445. }
  446. }
  447. mutex_unlock(&dev->tx_lock);
  448. if (msgv >= 0)
  449. msm_slim_put_ctrl(dev);
  450. if (!timeout)
  451. dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
  452. txn->mt);
  453. return timeout ? dev->err : -ETIMEDOUT;
  454. }
  455. static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
  456. {
  457. int msec_per_frm = 0;
  458. int sfr_per_sec;
  459. /* Wait for 1 superframe, or default time and then retry */
  460. sfr_per_sec = dev->framer.superfreq /
  461. (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
  462. if (sfr_per_sec)
  463. msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
  464. if (msec_per_frm < DEF_RETRY_MS)
  465. msec_per_frm = DEF_RETRY_MS;
  466. msleep(msec_per_frm);
  467. }
  468. static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
  469. u8 elen, u8 laddr)
  470. {
  471. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  472. struct completion done;
  473. int timeout, ret, retries = 0;
  474. u32 *buf;
  475. retry_laddr:
  476. init_completion(&done);
  477. mutex_lock(&dev->tx_lock);
  478. buf = msm_get_msg_buf(dev, 9, &done);
  479. if (buf == NULL)
  480. return -ENOMEM;
  481. buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
  482. SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
  483. SLIM_MSG_DEST_LOGICALADDR,
  484. ea[5] | ea[4] << 8);
  485. buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
  486. buf[2] = laddr;
  487. ret = msm_send_msg_buf(dev, buf, 9, MGR_TX_MSG);
  488. timeout = wait_for_completion_timeout(&done, HZ);
  489. if (!timeout)
  490. dev->err = -ETIMEDOUT;
  491. if (dev->err) {
  492. ret = dev->err;
  493. dev->err = 0;
  494. }
  495. mutex_unlock(&dev->tx_lock);
  496. if (ret) {
  497. pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
  498. if (retries < INIT_MX_RETRIES) {
  499. msm_slim_wait_retry(dev);
  500. retries++;
  501. goto retry_laddr;
  502. } else {
  503. pr_err("set LADDR failed after retrying:ret:%d", ret);
  504. }
  505. }
  506. return ret;
  507. }
  508. static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
  509. {
  510. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  511. enable_irq(dev->irq);
  512. clk_prepare_enable(dev->rclk);
  513. writel_relaxed(1, dev->base + FRM_WAKEUP);
  514. /* Make sure framer wakeup write goes through before exiting function */
  515. mb();
  516. /*
  517. * Workaround: Currently, slave is reporting lost-sync messages
  518. * after slimbus comes out of clock pause.
  519. * Transaction with slave fail before slave reports that message
  520. * Give some time for that report to come
  521. * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
  522. * being 250 usecs, we wait for 20 superframes here to ensure
  523. * we get the message
  524. */
  525. usleep_range(4950, 5000);
  526. return 0;
  527. }
  528. static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
  529. {
  530. struct msm_slim_ctrl *dev = sat->dev;
  531. enum slim_ch_control oper;
  532. int i;
  533. int ret = 0;
  534. if (mc == SLIM_USR_MC_CHAN_CTRL) {
  535. for (i = 0; i < sat->nsatch; i++) {
  536. if (buf[5] == sat->satch[i].chan)
  537. break;
  538. }
  539. if (i >= sat->nsatch)
  540. return -ENOTCONN;
  541. oper = ((buf[3] & 0xC0) >> 6);
  542. /* part of grp. activating/removing 1 will take care of rest */
  543. ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
  544. false);
  545. if (!ret) {
  546. for (i = 5; i < len; i++) {
  547. int j;
  548. for (j = 0; j < sat->nsatch; j++) {
  549. if (buf[i] != sat->satch[j].chan)
  550. continue;
  551. if (oper == SLIM_CH_REMOVE)
  552. sat->satch[j].req_rem++;
  553. else
  554. sat->satch[j].req_def++;
  555. break;
  556. }
  557. }
  558. }
  559. } else {
  560. u16 chh[40];
  561. struct slim_ch prop;
  562. u32 exp;
  563. u16 *grph = NULL;
  564. u8 coeff, cc;
  565. u8 prrate = buf[6];
  566. if (len <= 8)
  567. return -EINVAL;
  568. for (i = 8; i < len; i++) {
  569. int j = 0;
  570. for (j = 0; j < sat->nsatch; j++) {
  571. if (sat->satch[j].chan == buf[i]) {
  572. chh[i - 8] = sat->satch[j].chanh;
  573. break;
  574. }
  575. }
  576. if (j < sat->nsatch) {
  577. u16 dummy;
  578. ret = slim_query_ch(&sat->satcl, buf[i],
  579. &dummy);
  580. if (ret)
  581. return ret;
  582. if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
  583. sat->satch[j].req_def++;
  584. /* First channel in group from satellite */
  585. if (i == 8)
  586. grph = &sat->satch[j].chanh;
  587. continue;
  588. }
  589. if (sat->nsatch >= MSM_MAX_SATCH)
  590. return -EXFULL;
  591. ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
  592. if (ret)
  593. return ret;
  594. sat->satch[j].chan = buf[i];
  595. sat->satch[j].chanh = chh[i - 8];
  596. if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
  597. sat->satch[j].req_def++;
  598. if (i == 8)
  599. grph = &sat->satch[j].chanh;
  600. sat->nsatch++;
  601. }
  602. prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
  603. prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
  604. prop.baser = SLIM_RATE_4000HZ;
  605. if (prrate & 0x8)
  606. prop.baser = SLIM_RATE_11025HZ;
  607. else
  608. prop.baser = SLIM_RATE_4000HZ;
  609. prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
  610. prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
  611. exp = (u32)((buf[5] & 0xF0) >> 4);
  612. coeff = (buf[4] & 0x20) >> 5;
  613. cc = (coeff ? 3 : 1);
  614. prop.ratem = cc * (1 << exp);
  615. if (i > 9)
  616. ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
  617. true, &chh[0]);
  618. else
  619. ret = slim_define_ch(&sat->satcl, &prop,
  620. chh, 1, true, &chh[0]);
  621. dev_dbg(dev->dev, "define sat grp returned:%d", ret);
  622. if (ret)
  623. return ret;
  624. else if (grph)
  625. *grph = chh[0];
  626. /* part of group so activating 1 will take care of rest */
  627. if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
  628. ret = slim_control_ch(&sat->satcl,
  629. chh[0],
  630. SLIM_CH_ACTIVATE, false);
  631. }
  632. return ret;
  633. }
  634. static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
  635. {
  636. u8 buf[40];
  637. u8 mc, mt, len;
  638. int i, ret;
  639. if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
  640. len = buf[0] & 0x1F;
  641. mt = (buf[0] >> 5) & 0x7;
  642. mc = buf[1];
  643. if (mt == SLIM_MSG_MT_CORE &&
  644. mc == SLIM_MSG_MC_REPORT_PRESENT) {
  645. u8 laddr;
  646. u8 e_addr[6];
  647. for (i = 0; i < 6; i++)
  648. e_addr[i] = buf[7-i];
  649. ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr,
  650. false);
  651. /* Is this QTI ported generic device? */
  652. if (!ret && e_addr[5] == QC_MFGID_LSB &&
  653. e_addr[4] == QC_MFGID_MSB &&
  654. e_addr[1] == QC_DEVID_PGD &&
  655. e_addr[2] != QC_CHIPID_SL)
  656. dev->pgdla = laddr;
  657. if (!ret && !pm_runtime_enabled(dev->dev) &&
  658. laddr == (QC_MSM_DEVS - 1))
  659. pm_runtime_enable(dev->dev);
  660. if (!ret && msm_is_sat_dev(e_addr)) {
  661. struct msm_slim_sat *sat = addr_to_sat(dev,
  662. laddr);
  663. if (!sat)
  664. sat = msm_slim_alloc_sat(dev);
  665. if (!sat)
  666. return;
  667. sat->satcl.laddr = laddr;
  668. msm_sat_enqueue(sat, (u32 *)buf, len);
  669. queue_work(sat->wq, &sat->wd);
  670. }
  671. if (ret)
  672. pr_err("assign laddr failed, error:%d", ret);
  673. } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
  674. mc == SLIM_MSG_MC_REPLY_VALUE) {
  675. u8 tid = buf[3];
  676. dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
  677. slim_msg_response(&dev->ctrl, &buf[4], tid,
  678. len - 4);
  679. pm_runtime_mark_last_busy(dev->dev);
  680. } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
  681. u8 l_addr = buf[2];
  682. u16 ele = (u16)buf[4] << 4;
  683. ele |= ((buf[3] & 0xf0) >> 4);
  684. dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
  685. l_addr, ele);
  686. for (i = 0; i < len - 5; i++)
  687. dev_err(dev->dev, "offset:0x%x:bit mask:%x",
  688. i, buf[i+5]);
  689. } else {
  690. dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
  691. mc, mt);
  692. for (i = 0; i < len; i++)
  693. dev_err(dev->dev, "error msg: %x", buf[i]);
  694. }
  695. } else
  696. dev_err(dev->dev, "rxwq called and no dequeue");
  697. }
  698. static void slim_sat_rxprocess(struct work_struct *work)
  699. {
  700. struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
  701. struct msm_slim_ctrl *dev = sat->dev;
  702. u8 buf[40];
  703. while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
  704. struct slim_msg_txn txn;
  705. u8 len, mc, mt;
  706. u32 bw_sl;
  707. int ret = 0;
  708. int satv = -1;
  709. bool gen_ack = false;
  710. u8 tid;
  711. u8 wbuf[8];
  712. int i, retries = 0;
  713. txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
  714. txn.dt = SLIM_MSG_DEST_LOGICALADDR;
  715. txn.ec = 0;
  716. txn.rbuf = NULL;
  717. txn.la = sat->satcl.laddr;
  718. /* satellite handling */
  719. len = buf[0] & 0x1F;
  720. mc = buf[1];
  721. mt = (buf[0] >> 5) & 0x7;
  722. if (mt == SLIM_MSG_MT_CORE &&
  723. mc == SLIM_MSG_MC_REPORT_PRESENT) {
  724. u8 e_addr[6];
  725. for (i = 0; i < 6; i++)
  726. e_addr[i] = buf[7-i];
  727. if (pm_runtime_enabled(dev->dev)) {
  728. satv = msm_slim_get_ctrl(dev);
  729. if (satv >= 0)
  730. sat->pending_capability = true;
  731. }
  732. /*
  733. * Since capability message is already sent, present
  734. * message will indicate subsystem hosting this
  735. * satellite has restarted.
  736. * Remove all active channels of this satellite
  737. * when this is detected
  738. */
  739. if (sat->sent_capability) {
  740. for (i = 0; i < sat->nsatch; i++) {
  741. if (sat->satch[i].reconf) {
  742. pr_err("SSR, sat:%d, rm ch:%d",
  743. sat->satcl.laddr,
  744. sat->satch[i].chan);
  745. slim_control_ch(&sat->satcl,
  746. sat->satch[i].chanh,
  747. SLIM_CH_REMOVE, true);
  748. slim_dealloc_ch(&sat->satcl,
  749. sat->satch[i].chanh);
  750. sat->satch[i].reconf = false;
  751. }
  752. }
  753. }
  754. } else if (mt != SLIM_MSG_MT_CORE &&
  755. mc != SLIM_MSG_MC_REPORT_PRESENT) {
  756. satv = msm_slim_get_ctrl(dev);
  757. }
  758. switch (mc) {
  759. case SLIM_MSG_MC_REPORT_PRESENT:
  760. /* Remove runtime_pm vote once satellite acks */
  761. if (mt != SLIM_MSG_MT_CORE) {
  762. if (pm_runtime_enabled(dev->dev) &&
  763. sat->pending_capability) {
  764. msm_slim_put_ctrl(dev);
  765. sat->pending_capability = false;
  766. }
  767. continue;
  768. }
  769. /* send a Manager capability msg */
  770. if (sat->sent_capability) {
  771. if (mt == SLIM_MSG_MT_CORE)
  772. goto send_capability;
  773. else
  774. continue;
  775. }
  776. ret = slim_add_device(&dev->ctrl, &sat->satcl);
  777. if (ret) {
  778. dev_err(dev->dev,
  779. "Satellite-init failed");
  780. continue;
  781. }
  782. /* Satellite-channels */
  783. sat->satch = kzalloc(MSM_MAX_SATCH *
  784. sizeof(struct msm_sat_chan),
  785. GFP_KERNEL);
  786. send_capability:
  787. txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
  788. txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
  789. txn.la = sat->satcl.laddr;
  790. txn.rl = 8;
  791. wbuf[0] = SAT_MAGIC_LSB;
  792. wbuf[1] = SAT_MAGIC_MSB;
  793. wbuf[2] = SAT_MSG_VER;
  794. wbuf[3] = SAT_MSG_PROT;
  795. txn.wbuf = wbuf;
  796. txn.len = 4;
  797. ret = msm_xfer_msg(&dev->ctrl, &txn);
  798. if (ret) {
  799. pr_err("capability for:0x%x fail:%d, retry:%d",
  800. sat->satcl.laddr, ret, retries);
  801. if (retries < INIT_MX_RETRIES) {
  802. msm_slim_wait_retry(dev);
  803. retries++;
  804. goto send_capability;
  805. } else {
  806. pr_err("failed after all retries:%d",
  807. ret);
  808. }
  809. } else {
  810. sat->sent_capability = true;
  811. }
  812. break;
  813. case SLIM_USR_MC_ADDR_QUERY:
  814. memcpy(&wbuf[1], &buf[4], 6);
  815. ret = slim_get_logical_addr(&sat->satcl,
  816. &wbuf[1], 6, &wbuf[7]);
  817. if (ret)
  818. memset(&wbuf[1], 0, 6);
  819. wbuf[0] = buf[3];
  820. txn.mc = SLIM_USR_MC_ADDR_REPLY;
  821. txn.rl = 12;
  822. txn.len = 8;
  823. txn.wbuf = wbuf;
  824. msm_xfer_msg(&dev->ctrl, &txn);
  825. break;
  826. case SLIM_USR_MC_DEFINE_CHAN:
  827. case SLIM_USR_MC_DEF_ACT_CHAN:
  828. case SLIM_USR_MC_CHAN_CTRL:
  829. if (mc != SLIM_USR_MC_CHAN_CTRL)
  830. tid = buf[7];
  831. else
  832. tid = buf[4];
  833. gen_ack = true;
  834. ret = msm_sat_define_ch(sat, buf, len, mc);
  835. if (ret) {
  836. dev_err(dev->dev,
  837. "SAT define_ch returned:%d",
  838. ret);
  839. }
  840. if (!sat->pending_reconf) {
  841. int chv = msm_slim_get_ctrl(dev);
  842. if (chv >= 0)
  843. sat->pending_reconf = true;
  844. }
  845. break;
  846. case SLIM_USR_MC_RECONFIG_NOW:
  847. tid = buf[3];
  848. gen_ack = true;
  849. ret = slim_reconfigure_now(&sat->satcl);
  850. for (i = 0; i < sat->nsatch; i++) {
  851. struct msm_sat_chan *sch = &sat->satch[i];
  852. if (sch->req_rem && sch->reconf) {
  853. if (!ret) {
  854. slim_dealloc_ch(&sat->satcl,
  855. sch->chanh);
  856. sch->reconf = false;
  857. }
  858. sch->req_rem--;
  859. } else if (sch->req_def) {
  860. if (ret)
  861. slim_dealloc_ch(&sat->satcl,
  862. sch->chanh);
  863. else
  864. sch->reconf = true;
  865. sch->req_def--;
  866. }
  867. }
  868. if (sat->pending_reconf) {
  869. msm_slim_put_ctrl(dev);
  870. sat->pending_reconf = false;
  871. }
  872. break;
  873. case SLIM_USR_MC_REQ_BW:
  874. /* what we get is in SLOTS */
  875. bw_sl = (u32)buf[4] << 3 |
  876. ((buf[3] & 0xE0) >> 5);
  877. sat->satcl.pending_msgsl = bw_sl;
  878. tid = buf[5];
  879. gen_ack = true;
  880. break;
  881. case SLIM_USR_MC_CONNECT_SRC:
  882. case SLIM_USR_MC_CONNECT_SINK:
  883. if (mc == SLIM_USR_MC_CONNECT_SRC)
  884. txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
  885. else
  886. txn.mc = SLIM_MSG_MC_CONNECT_SINK;
  887. wbuf[0] = buf[4] & 0x1F;
  888. wbuf[1] = buf[5];
  889. tid = buf[6];
  890. txn.la = buf[3];
  891. txn.mt = SLIM_MSG_MT_CORE;
  892. txn.rl = 6;
  893. txn.len = 2;
  894. txn.wbuf = wbuf;
  895. gen_ack = true;
  896. ret = msm_xfer_msg(&dev->ctrl, &txn);
  897. break;
  898. case SLIM_USR_MC_DISCONNECT_PORT:
  899. txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
  900. wbuf[0] = buf[4] & 0x1F;
  901. tid = buf[5];
  902. txn.la = buf[3];
  903. txn.rl = 5;
  904. txn.len = 1;
  905. txn.mt = SLIM_MSG_MT_CORE;
  906. txn.wbuf = wbuf;
  907. gen_ack = true;
  908. ret = msm_xfer_msg(&dev->ctrl, &txn);
  909. break;
  910. case SLIM_MSG_MC_REPORT_ABSENT:
  911. dev_info(dev->dev, "Received Report Absent Message\n");
  912. break;
  913. default:
  914. break;
  915. }
  916. if (!gen_ack) {
  917. if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
  918. msm_slim_put_ctrl(dev);
  919. continue;
  920. }
  921. wbuf[0] = tid;
  922. if (!ret)
  923. wbuf[1] = MSM_SAT_SUCCSS;
  924. else
  925. wbuf[1] = 0;
  926. txn.mc = SLIM_USR_MC_GENERIC_ACK;
  927. txn.la = sat->satcl.laddr;
  928. txn.rl = 6;
  929. txn.len = 2;
  930. txn.wbuf = wbuf;
  931. txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
  932. msm_xfer_msg(&dev->ctrl, &txn);
  933. if (satv >= 0)
  934. msm_slim_put_ctrl(dev);
  935. }
  936. }
  937. static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
  938. {
  939. struct msm_slim_sat *sat;
  940. char *name;
  941. if (dev->nsats >= MSM_MAX_NSATS)
  942. return NULL;
  943. sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
  944. if (!sat)
  945. return NULL;
  946. name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
  947. if (!name) {
  948. kfree(sat);
  949. return NULL;
  950. }
  951. dev->satd[dev->nsats] = sat;
  952. sat->dev = dev;
  953. snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
  954. sat->satcl.name = name;
  955. spin_lock_init(&sat->lock);
  956. INIT_WORK(&sat->wd, slim_sat_rxprocess);
  957. sat->wq = create_singlethread_workqueue(sat->satcl.name);
  958. if (!sat->wq) {
  959. kfree(name);
  960. kfree(sat);
  961. return NULL;
  962. }
  963. /*
  964. * Both sats will be allocated from RX thread and RX thread will
  965. * process messages sequentially. No synchronization necessary
  966. */
  967. dev->nsats++;
  968. return sat;
  969. }
  970. static int msm_slim_rx_msgq_thread(void *data)
  971. {
  972. struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
  973. struct completion *notify = &dev->rx_msgq_notify;
  974. struct msm_slim_sat *sat = NULL;
  975. u32 mc = 0;
  976. u32 mt = 0;
  977. u32 buffer[10];
  978. int index = 0;
  979. u8 msg_len = 0;
  980. int ret;
  981. dev_dbg(dev->dev, "rx thread started");
  982. while (!kthread_should_stop()) {
  983. set_current_state(TASK_INTERRUPTIBLE);
  984. ret = wait_for_completion_interruptible(notify);
  985. if (ret)
  986. dev_err(dev->dev, "rx thread wait error:%d", ret);
  987. /* 1 irq notification per message */
  988. if (dev->use_rx_msgqs != MSM_MSGQ_ENABLED) {
  989. msm_slim_rxwq(dev);
  990. continue;
  991. }
  992. ret = msm_slim_rx_msgq_get(dev, buffer, index);
  993. if (ret) {
  994. dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
  995. continue;
  996. }
  997. pr_debug("message[%d] = 0x%x\n", index, *buffer);
  998. /* Decide if we use generic RX or satellite RX */
  999. if (index++ == 0) {
  1000. msg_len = *buffer & 0x1F;
  1001. pr_debug("Start of new message, len = %d\n", msg_len);
  1002. mt = (buffer[0] >> 5) & 0x7;
  1003. mc = (buffer[0] >> 8) & 0xff;
  1004. dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
  1005. if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
  1006. mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
  1007. u8 laddr;
  1008. laddr = (u8)((buffer[0] >> 16) & 0xff);
  1009. sat = addr_to_sat(dev, laddr);
  1010. }
  1011. }
  1012. if ((index * 4) >= msg_len) {
  1013. index = 0;
  1014. if (sat) {
  1015. msm_sat_enqueue(sat, buffer, msg_len);
  1016. queue_work(sat->wq, &sat->wd);
  1017. sat = NULL;
  1018. } else {
  1019. msm_slim_rx_enqueue(dev, buffer, msg_len);
  1020. msm_slim_rxwq(dev);
  1021. }
  1022. }
  1023. }
  1024. return 0;
  1025. }
  1026. static void msm_slim_prg_slew(struct platform_device *pdev,
  1027. struct msm_slim_ctrl *dev)
  1028. {
  1029. struct resource *slew_io;
  1030. void __iomem *slew_reg;
  1031. /* SLEW RATE register for this slimbus */
  1032. dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1033. "slimbus_slew_reg");
  1034. if (!dev->slew_mem) {
  1035. dev_dbg(&pdev->dev, "no slimbus slew resource\n");
  1036. return;
  1037. }
  1038. slew_io = request_mem_region(dev->slew_mem->start,
  1039. resource_size(dev->slew_mem), pdev->name);
  1040. if (!slew_io) {
  1041. dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
  1042. dev->slew_mem = NULL;
  1043. return;
  1044. }
  1045. slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
  1046. if (!slew_reg) {
  1047. dev_dbg(dev->dev, "slew register mapping failed");
  1048. release_mem_region(dev->slew_mem->start,
  1049. resource_size(dev->slew_mem));
  1050. dev->slew_mem = NULL;
  1051. return;
  1052. }
  1053. writel_relaxed(1, slew_reg);
  1054. /* Make sure slimbus-slew rate enabling goes through */
  1055. wmb();
  1056. iounmap(slew_reg);
  1057. }
  1058. static int msm_slim_probe(struct platform_device *pdev)
  1059. {
  1060. struct msm_slim_ctrl *dev;
  1061. int ret;
  1062. enum apr_subsys_state q6_state;
  1063. struct resource *bam_mem, *bam_io;
  1064. struct resource *slim_mem, *slim_io;
  1065. struct resource *irq, *bam_irq;
  1066. bool rxreg_access = false;
  1067. q6_state = apr_get_q6_state();
  1068. if (q6_state == APR_SUBSYS_DOWN) {
  1069. dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
  1070. q6_state);
  1071. return -EPROBE_DEFER;
  1072. }
  1073. dev_dbg(&pdev->dev, "adsp is ready\n");
  1074. slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1075. "slimbus_physical");
  1076. if (!slim_mem) {
  1077. dev_err(&pdev->dev, "no slimbus physical memory resource\n");
  1078. return -ENODEV;
  1079. }
  1080. slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
  1081. pdev->name);
  1082. if (!slim_io) {
  1083. dev_err(&pdev->dev, "slimbus memory already claimed\n");
  1084. return -EBUSY;
  1085. }
  1086. bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1087. "slimbus_bam_physical");
  1088. if (!bam_mem) {
  1089. dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
  1090. ret = -ENODEV;
  1091. goto err_get_res_bam_failed;
  1092. }
  1093. bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
  1094. pdev->name);
  1095. if (!bam_io) {
  1096. release_mem_region(slim_mem->start, resource_size(slim_mem));
  1097. dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
  1098. ret = -EBUSY;
  1099. goto err_get_res_bam_failed;
  1100. }
  1101. irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  1102. "slimbus_irq");
  1103. if (!irq) {
  1104. dev_err(&pdev->dev, "no slimbus IRQ resource\n");
  1105. ret = -ENODEV;
  1106. goto err_get_res_failed;
  1107. }
  1108. bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  1109. "slimbus_bam_irq");
  1110. if (!bam_irq) {
  1111. dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
  1112. ret = -ENODEV;
  1113. goto err_get_res_failed;
  1114. }
  1115. dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
  1116. if (!dev) {
  1117. ret = -ENOMEM;
  1118. goto err_get_res_failed;
  1119. }
  1120. dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
  1121. GFP_KERNEL);
  1122. if (!dev->wr_comp)
  1123. return -ENOMEM;
  1124. dev->dev = &pdev->dev;
  1125. platform_set_drvdata(pdev, dev);
  1126. slim_set_ctrldata(&dev->ctrl, dev);
  1127. dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
  1128. if (!dev->base) {
  1129. dev_err(&pdev->dev, "IOremap failed\n");
  1130. ret = -ENOMEM;
  1131. goto err_ioremap_failed;
  1132. }
  1133. dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
  1134. if (!dev->bam.base) {
  1135. dev_err(&pdev->dev, "BAM IOremap failed\n");
  1136. ret = -ENOMEM;
  1137. goto err_ioremap_bam_failed;
  1138. }
  1139. if (pdev->dev.of_node) {
  1140. ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
  1141. &dev->ctrl.nr);
  1142. if (ret) {
  1143. dev_err(&pdev->dev, "Cell index not specified:%d", ret);
  1144. goto err_of_init_failed;
  1145. }
  1146. rxreg_access = of_property_read_bool(pdev->dev.of_node,
  1147. "qcom,rxreg-access");
  1148. /* Optional properties */
  1149. ret = of_property_read_u32(pdev->dev.of_node,
  1150. "qcom,min-clk-gear", &dev->ctrl.min_cg);
  1151. ret = of_property_read_u32(pdev->dev.of_node,
  1152. "qcom,max-clk-gear", &dev->ctrl.max_cg);
  1153. pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
  1154. dev->ctrl.max_cg, rxreg_access);
  1155. } else {
  1156. dev->ctrl.nr = pdev->id;
  1157. }
  1158. dev->ctrl.nchans = MSM_SLIM_NCHANS;
  1159. dev->ctrl.nports = MSM_SLIM_NPORTS;
  1160. dev->ctrl.set_laddr = msm_set_laddr;
  1161. dev->ctrl.xfer_msg = msm_xfer_msg;
  1162. dev->ctrl.wakeup = msm_clk_pause_wakeup;
  1163. dev->ctrl.alloc_port = msm_alloc_port;
  1164. dev->ctrl.dealloc_port = msm_dealloc_port;
  1165. dev->ctrl.port_xfer = msm_slim_port_xfer;
  1166. dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
  1167. /* Reserve some messaging BW for satellite-apps driver communication */
  1168. dev->ctrl.sched.pending_msgsl = 30;
  1169. init_completion(&dev->reconf);
  1170. mutex_init(&dev->tx_lock);
  1171. spin_lock_init(&dev->rx_lock);
  1172. dev->ee = 1;
  1173. if (rxreg_access)
  1174. dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
  1175. else
  1176. dev->use_rx_msgqs = MSM_MSGQ_RESET;
  1177. dev->irq = irq->start;
  1178. dev->bam.irq = bam_irq->start;
  1179. dev->hclk = clk_get(dev->dev, "iface_clk");
  1180. if (IS_ERR(dev->hclk))
  1181. dev->hclk = NULL;
  1182. else
  1183. clk_prepare_enable(dev->hclk);
  1184. ret = msm_slim_sps_init(dev, bam_mem, MGR_STATUS, false);
  1185. if (ret != 0) {
  1186. dev_err(dev->dev, "error SPS init\n");
  1187. goto err_sps_init_failed;
  1188. }
  1189. /* Fire up the Rx message queue thread */
  1190. dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
  1191. MSM_SLIM_NAME "_rx_msgq_thread");
  1192. if (IS_ERR(dev->rx_msgq_thread)) {
  1193. ret = PTR_ERR(dev->rx_msgq_thread);
  1194. dev_err(dev->dev, "Failed to start Rx message queue thread\n");
  1195. goto err_thread_create_failed;
  1196. }
  1197. dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
  1198. dev->framer.superfreq =
  1199. dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
  1200. dev->ctrl.a_framer = &dev->framer;
  1201. dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
  1202. dev->ctrl.dev.parent = &pdev->dev;
  1203. dev->ctrl.dev.of_node = pdev->dev.of_node;
  1204. ret = request_threaded_irq(dev->irq, NULL, msm_slim_interrupt,
  1205. IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1206. "msm_slim_irq", dev);
  1207. if (ret) {
  1208. dev_err(&pdev->dev, "request IRQ failed\n");
  1209. goto err_request_irq_failed;
  1210. }
  1211. msm_slim_prg_slew(pdev, dev);
  1212. /* Register with framework before enabling frame, clock */
  1213. ret = slim_add_numbered_controller(&dev->ctrl);
  1214. if (ret) {
  1215. dev_err(dev->dev, "error adding controller\n");
  1216. goto err_ctrl_failed;
  1217. }
  1218. dev->rclk = clk_get(dev->dev, "core_clk");
  1219. if (!dev->rclk) {
  1220. dev_err(dev->dev, "slimbus clock not found");
  1221. goto err_clk_get_failed;
  1222. }
  1223. clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
  1224. clk_prepare_enable(dev->rclk);
  1225. dev->ver = readl_relaxed(dev->base);
  1226. /* Version info in 16 MSbits */
  1227. dev->ver >>= 16;
  1228. /* Component register initialization */
  1229. writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
  1230. writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
  1231. dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
  1232. /*
  1233. * Manager register initialization
  1234. * If RX msg Q is used, disable RX_MSG_RCVD interrupt
  1235. */
  1236. if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
  1237. writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
  1238. MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
  1239. MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
  1240. else
  1241. writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
  1242. MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
  1243. MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
  1244. writel_relaxed(1, dev->base + MGR_CFG);
  1245. /*
  1246. * Framer registers are beyond 1K memory region after Manager and/or
  1247. * component registers. Make sure those writes are ordered
  1248. * before framer register writes
  1249. */
  1250. wmb();
  1251. /* Framer register initialization */
  1252. writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
  1253. (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
  1254. dev->base + FRM_CFG);
  1255. /*
  1256. * Make sure that framer wake-up and enabling writes go through
  1257. * before any other component is enabled. Framer is responsible for
  1258. * clocking the bus and enabling framer first will ensure that other
  1259. * devices can report presence when they are enabled
  1260. */
  1261. mb();
  1262. /* Enable RX msg Q */
  1263. if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
  1264. writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
  1265. dev->base + MGR_CFG);
  1266. else
  1267. writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
  1268. /*
  1269. * Make sure that manager-enable is written through before interface
  1270. * device is enabled
  1271. */
  1272. mb();
  1273. writel_relaxed(1, dev->base + INTF_CFG);
  1274. /*
  1275. * Make sure that interface-enable is written through before enabling
  1276. * ported generic device inside MSM manager
  1277. */
  1278. mb();
  1279. writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
  1280. writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
  1281. (4 * dev->ee));
  1282. /*
  1283. * Make sure that ported generic device is enabled and port-EE settings
  1284. * are written through before finally enabling the component
  1285. */
  1286. mb();
  1287. writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
  1288. /*
  1289. * Make sure that all writes have gone through before exiting this
  1290. * function
  1291. */
  1292. mb();
  1293. /* Add devices registered with board-info now that controller is up */
  1294. slim_ctrl_add_boarddevs(&dev->ctrl);
  1295. if (pdev->dev.of_node)
  1296. of_register_slim_devices(&dev->ctrl);
  1297. pm_runtime_use_autosuspend(&pdev->dev);
  1298. pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
  1299. pm_runtime_set_active(&pdev->dev);
  1300. dev_dbg(dev->dev, "MSM SB controller is up!\n");
  1301. return 0;
  1302. err_ctrl_failed:
  1303. writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
  1304. err_clk_get_failed:
  1305. kfree(dev->satd);
  1306. err_request_irq_failed:
  1307. kthread_stop(dev->rx_msgq_thread);
  1308. err_thread_create_failed:
  1309. msm_slim_sps_exit(dev, true);
  1310. msm_slim_deinit_ep(dev, &dev->rx_msgq,
  1311. &dev->use_rx_msgqs);
  1312. msm_slim_deinit_ep(dev, &dev->tx_msgq,
  1313. &dev->use_tx_msgqs);
  1314. err_sps_init_failed:
  1315. if (dev->hclk) {
  1316. clk_disable_unprepare(dev->hclk);
  1317. clk_put(dev->hclk);
  1318. }
  1319. err_of_init_failed:
  1320. iounmap(dev->bam.base);
  1321. err_ioremap_bam_failed:
  1322. iounmap(dev->base);
  1323. err_ioremap_failed:
  1324. kfree(dev->wr_comp);
  1325. kfree(dev);
  1326. err_get_res_failed:
  1327. release_mem_region(bam_mem->start, resource_size(bam_mem));
  1328. err_get_res_bam_failed:
  1329. release_mem_region(slim_mem->start, resource_size(slim_mem));
  1330. return ret;
  1331. }
  1332. static int msm_slim_remove(struct platform_device *pdev)
  1333. {
  1334. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1335. struct resource *bam_mem;
  1336. struct resource *slim_mem;
  1337. struct resource *slew_mem = dev->slew_mem;
  1338. int i;
  1339. for (i = 0; i < dev->nsats; i++) {
  1340. struct msm_slim_sat *sat = dev->satd[i];
  1341. int j;
  1342. for (j = 0; j < sat->nsatch; j++)
  1343. slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
  1344. slim_remove_device(&sat->satcl);
  1345. kfree(sat->satch);
  1346. destroy_workqueue(sat->wq);
  1347. kfree(sat->satcl.name);
  1348. kfree(sat);
  1349. }
  1350. pm_runtime_disable(&pdev->dev);
  1351. pm_runtime_set_suspended(&pdev->dev);
  1352. free_irq(dev->irq, dev);
  1353. slim_del_controller(&dev->ctrl);
  1354. clk_put(dev->rclk);
  1355. if (dev->hclk)
  1356. clk_put(dev->hclk);
  1357. msm_slim_sps_exit(dev, true);
  1358. msm_slim_deinit_ep(dev, &dev->rx_msgq,
  1359. &dev->use_rx_msgqs);
  1360. msm_slim_deinit_ep(dev, &dev->tx_msgq,
  1361. &dev->use_tx_msgqs);
  1362. kthread_stop(dev->rx_msgq_thread);
  1363. iounmap(dev->bam.base);
  1364. iounmap(dev->base);
  1365. kfree(dev->wr_comp);
  1366. kfree(dev);
  1367. bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1368. "slimbus_bam_physical");
  1369. if (bam_mem)
  1370. release_mem_region(bam_mem->start, resource_size(bam_mem));
  1371. if (slew_mem)
  1372. release_mem_region(slew_mem->start, resource_size(slew_mem));
  1373. slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1374. "slimbus_physical");
  1375. if (slim_mem)
  1376. release_mem_region(slim_mem->start, resource_size(slim_mem));
  1377. return 0;
  1378. }
  1379. #ifdef CONFIG_PM
  1380. static int msm_slim_runtime_idle(struct device *device)
  1381. {
  1382. struct platform_device *pdev = to_platform_device(device);
  1383. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1384. if (dev->state == MSM_CTRL_AWAKE)
  1385. dev->state = MSM_CTRL_IDLE;
  1386. dev_dbg(device, "pm_runtime: idle...\n");
  1387. pm_request_autosuspend(device);
  1388. return -EAGAIN;
  1389. }
  1390. #endif
  1391. /*
  1392. * If PM_RUNTIME is not defined, these 2 functions become helper
  1393. * functions to be called from system suspend/resume. So they are not
  1394. * inside ifdef CONFIG_PM_RUNTIME
  1395. */
  1396. #ifdef CONFIG_PM
  1397. static int msm_slim_runtime_suspend(struct device *device)
  1398. {
  1399. struct platform_device *pdev = to_platform_device(device);
  1400. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1401. int ret;
  1402. dev_dbg(device, "pm_runtime: suspending...\n");
  1403. ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
  1404. if (ret) {
  1405. dev_err(device, "clk pause not entered:%d", ret);
  1406. dev->state = MSM_CTRL_AWAKE;
  1407. } else {
  1408. dev->state = MSM_CTRL_ASLEEP;
  1409. }
  1410. return ret;
  1411. }
  1412. static int msm_slim_runtime_resume(struct device *device)
  1413. {
  1414. struct platform_device *pdev = to_platform_device(device);
  1415. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1416. int ret = 0;
  1417. dev_dbg(device, "pm_runtime: resuming...\n");
  1418. if (dev->state == MSM_CTRL_ASLEEP)
  1419. ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
  1420. if (ret) {
  1421. dev_err(device, "clk pause not exited:%d", ret);
  1422. dev->state = MSM_CTRL_ASLEEP;
  1423. } else {
  1424. dev->state = MSM_CTRL_AWAKE;
  1425. }
  1426. return ret;
  1427. }
  1428. #endif
  1429. #ifdef CONFIG_PM_SLEEP
  1430. static int msm_slim_suspend(struct device *dev)
  1431. {
  1432. int ret = -EBUSY;
  1433. struct platform_device *pdev = to_platform_device(dev);
  1434. struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
  1435. if (!pm_runtime_enabled(dev) ||
  1436. (!pm_runtime_suspended(dev) &&
  1437. cdev->state == MSM_CTRL_IDLE)) {
  1438. dev_dbg(dev, "system suspend");
  1439. ret = msm_slim_runtime_suspend(dev);
  1440. if (!ret) {
  1441. if (cdev->hclk)
  1442. clk_disable_unprepare(cdev->hclk);
  1443. }
  1444. }
  1445. if (ret == -EBUSY) {
  1446. /*
  1447. * If the clock pause failed due to active channels, there is
  1448. * a possibility that some audio stream is active during suspend
  1449. * We dont want to return suspend failure in that case so that
  1450. * display and relevant components can still go to suspend.
  1451. * If there is some other error, then it should be passed-on
  1452. * to system level suspend
  1453. */
  1454. ret = 0;
  1455. }
  1456. return ret;
  1457. }
  1458. static int msm_slim_resume(struct device *dev)
  1459. {
  1460. /* If runtime_pm is enabled, this resume shouldn't do anything */
  1461. if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
  1462. struct platform_device *pdev = to_platform_device(dev);
  1463. struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
  1464. int ret;
  1465. dev_dbg(dev, "system resume");
  1466. if (cdev->hclk)
  1467. clk_prepare_enable(cdev->hclk);
  1468. ret = msm_slim_runtime_resume(dev);
  1469. if (!ret) {
  1470. pm_runtime_mark_last_busy(dev);
  1471. pm_request_autosuspend(dev);
  1472. }
  1473. return ret;
  1474. }
  1475. return 0;
  1476. }
  1477. #endif /* CONFIG_PM_SLEEP */
  1478. static const struct dev_pm_ops msm_slim_dev_pm_ops = {
  1479. SET_SYSTEM_SLEEP_PM_OPS(
  1480. msm_slim_suspend,
  1481. msm_slim_resume
  1482. )
  1483. SET_RUNTIME_PM_OPS(
  1484. msm_slim_runtime_suspend,
  1485. msm_slim_runtime_resume,
  1486. msm_slim_runtime_idle
  1487. )
  1488. };
  1489. static const struct of_device_id msm_slim_dt_match[] = {
  1490. {
  1491. .compatible = "qcom,slim-msm",
  1492. },
  1493. {}
  1494. };
  1495. static struct platform_driver msm_slim_driver = {
  1496. .probe = msm_slim_probe,
  1497. .remove = msm_slim_remove,
  1498. .driver = {
  1499. .name = MSM_SLIM_NAME,
  1500. .owner = THIS_MODULE,
  1501. .pm = &msm_slim_dev_pm_ops,
  1502. .of_match_table = msm_slim_dt_match,
  1503. },
  1504. };
  1505. static int msm_slim_init(void)
  1506. {
  1507. return platform_driver_register(&msm_slim_driver);
  1508. }
  1509. subsys_initcall(msm_slim_init);
  1510. static void msm_slim_exit(void)
  1511. {
  1512. platform_driver_unregister(&msm_slim_driver);
  1513. }
  1514. module_exit(msm_slim_exit);
  1515. MODULE_LICENSE("GPL v2");
  1516. MODULE_DESCRIPTION("MSM Slimbus controller");
  1517. MODULE_ALIAS("platform:msm-slim");