qcom-rpmh-mailbox.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. /* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
  14. #include <linux/atomic.h>
  15. #include <linux/bitmap.h>
  16. #include <linux/delay.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/ipc_logging.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/kernel.h>
  21. #include <linux/list.h>
  22. #include <linux/mailbox_client.h> /* For dev_err */
  23. #include <linux/mailbox_controller.h>
  24. #include <linux/module.h>
  25. #include <linux/of.h>
  26. #include <linux/of_address.h>
  27. #include <linux/of_irq.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/spinlock.h>
  30. #include <asm/arch_timer.h>
  31. #include <asm-generic/io.h>
  32. #include <soc/qcom/tcs.h>
  33. #include <dt-bindings/soc/qcom,tcs-mbox.h>
  34. #include "mailbox.h"
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/rpmh.h>
  37. #define RSC_DRV_IPC_LOG_SIZE 2
  38. #define MAX_CMDS_PER_TCS 16
  39. #define MAX_TCS_PER_TYPE 3
  40. #define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
  41. #define RSC_DRV_TCS_OFFSET 672
  42. #define RSC_DRV_CMD_OFFSET 20
  43. /* DRV Configuration Information Register */
  44. #define DRV_PRNT_CHLD_CONFIG 0x0C
  45. #define DRV_NUM_TCS_MASK 0x3F
  46. #define DRV_NUM_TCS_SHIFT 6
  47. #define DRV_NCPT_MASK 0x1F
  48. #define DRV_NCPT_SHIFT 27
  49. /* Register offsets */
  50. #define RSC_DRV_IRQ_ENABLE 0x00
  51. #define RSC_DRV_IRQ_STATUS 0x04
  52. #define RSC_DRV_IRQ_CLEAR 0x08
  53. #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
  54. #define RSC_DRV_CONTROL 0x14
  55. #define RSC_DRV_STATUS 0x18
  56. #define RSC_DRV_CMD_ENABLE 0x1C
  57. #define RSC_DRV_CMD_MSGID 0x30
  58. #define RSC_DRV_CMD_ADDR 0x34
  59. #define RSC_DRV_CMD_DATA 0x38
  60. #define RSC_DRV_CMD_STATUS 0x3C
  61. #define RSC_DRV_CMD_RESP_DATA 0x40
  62. #define TCS_AMC_MODE_ENABLE BIT(16)
  63. #define TCS_AMC_MODE_TRIGGER BIT(24)
  64. /* TCS CMD register bit mask */
  65. #define CMD_MSGID_LEN 8
  66. #define CMD_MSGID_RESP_REQ BIT(8)
  67. #define CMD_MSGID_WRITE BIT(16)
  68. #define CMD_STATUS_ISSUED BIT(8)
  69. #define CMD_STATUS_COMPL BIT(16)
  70. /* Control/Hidden TCS */
  71. #define TCS_HIDDEN_MAX_SLOTS 2
  72. #define TCS_HIDDEN_CMD0_DRV_DATA 0x38
  73. #define TCS_HIDDEN_CMD_SHIFT 0x08
  74. #define TCS_TYPE_NR 4
  75. #define MAX_POOL_SIZE (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
  76. #define TCS_M_INIT 0xFFFF
  77. struct rsc_drv;
  78. struct tcs_response {
  79. struct rsc_drv *drv;
  80. struct mbox_chan *chan;
  81. struct tcs_mbox_msg *msg;
  82. u32 m; /* m-th TCS */
  83. int err;
  84. int idx;
  85. bool in_use;
  86. struct list_head list;
  87. };
  88. struct tcs_response_pool {
  89. struct tcs_response resp[MAX_POOL_SIZE];
  90. spinlock_t lock;
  91. DECLARE_BITMAP(avail, MAX_POOL_SIZE);
  92. };
  93. /* One per TCS type of a controller */
  94. struct tcs_mbox {
  95. struct rsc_drv *drv;
  96. u32 *cmd_addr;
  97. int type;
  98. u32 tcs_mask;
  99. u32 tcs_offset;
  100. int num_tcs;
  101. int ncpt; /* num cmds per tcs */
  102. DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
  103. spinlock_t tcs_lock; /* TCS type lock */
  104. };
  105. /* One per MBOX controller */
  106. struct rsc_drv {
  107. struct mbox_controller mbox;
  108. const char *name;
  109. unsigned long addr;
  110. void __iomem *base; /* start address of the RSC's registers */
  111. void __iomem *reg_base; /* start address for DRV specific register */
  112. int drv_id;
  113. struct platform_device *pdev;
  114. struct tcs_mbox tcs[TCS_TYPE_NR];
  115. int num_assigned;
  116. int num_tcs;
  117. struct tasklet_struct tasklet;
  118. struct list_head response_pending;
  119. spinlock_t drv_lock;
  120. struct tcs_response_pool *resp_pool;
  121. atomic_t tcs_in_use[MAX_POOL_SIZE];
  122. /* Debug info */
  123. u64 tcs_last_sent_ts[MAX_POOL_SIZE];
  124. u64 tcs_last_recv_ts[MAX_POOL_SIZE];
  125. atomic_t tcs_send_count[MAX_POOL_SIZE];
  126. atomic_t tcs_irq_count[MAX_POOL_SIZE];
  127. void *ipc_log_ctx;
  128. };
  129. /* Log to IPC and Ftrace */
  130. #define log_send_msg(drv, m, n, i, a, d, c, t) do { \
  131. trace_rpmh_send_msg(drv->name, drv->addr, m, n, i, a, d, c, t); \
  132. ipc_log_string(drv->ipc_log_ctx, \
  133. "send msg: m=%d n=%d msgid=0x%x addr=0x%x data=0x%x cmpl=%d trigger=%d", \
  134. m, n, i, a, d, c, t); \
  135. } while (0)
  136. #define log_rpmh_notify_irq(drv, m, a, e) do { \
  137. trace_rpmh_notify_irq(drv->name, m, a, e); \
  138. ipc_log_string(drv->ipc_log_ctx, \
  139. "irq response: m=%d addr=0x%x err=%d", m, a, e); \
  140. } while (0)
  141. #define log_rpmh_control_msg(drv, d) do { \
  142. trace_rpmh_control_msg(drv->name, d); \
  143. ipc_log_string(drv->ipc_log_ctx, "ctrlr msg: data=0x%x", d); \
  144. } while (0)
  145. #define log_rpmh_notify(drv, m, a, e) do { \
  146. trace_rpmh_notify(drv->name, m, a, e); \
  147. ipc_log_string(drv->ipc_log_ctx, \
  148. "tx done: m=%d addr=0x%x err=%d", m, a, e); \
  149. } while (0)
  150. static int tcs_response_pool_init(struct rsc_drv *drv)
  151. {
  152. struct tcs_response_pool *pool;
  153. int i;
  154. pool = devm_kzalloc(&drv->pdev->dev, sizeof(*pool), GFP_KERNEL);
  155. if (!pool)
  156. return -ENOMEM;
  157. for (i = 0; i < MAX_POOL_SIZE; i++) {
  158. pool->resp[i].drv = drv;
  159. pool->resp[i].idx = i;
  160. pool->resp[i].m = TCS_M_INIT;
  161. INIT_LIST_HEAD(&pool->resp[i].list);
  162. }
  163. spin_lock_init(&pool->lock);
  164. drv->resp_pool = pool;
  165. return 0;
  166. }
  167. static struct tcs_response *setup_response(struct rsc_drv *drv,
  168. struct tcs_mbox_msg *msg, struct mbox_chan *chan,
  169. u32 m, int err)
  170. {
  171. struct tcs_response_pool *pool = drv->resp_pool;
  172. struct tcs_response *resp = ERR_PTR(-ENOMEM);
  173. int pos;
  174. unsigned long flags;
  175. spin_lock_irqsave(&pool->lock, flags);
  176. pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
  177. if (pos != MAX_POOL_SIZE) {
  178. bitmap_set(pool->avail, pos, 1);
  179. resp = &pool->resp[pos];
  180. resp->chan = chan;
  181. resp->msg = msg;
  182. resp->m = m;
  183. resp->err = err;
  184. resp->in_use = false;
  185. }
  186. spin_unlock_irqrestore(&pool->lock, flags);
  187. if (pos == MAX_POOL_SIZE)
  188. pr_err("response pool is full\n");
  189. return resp;
  190. }
  191. static void free_response(struct tcs_response *resp)
  192. {
  193. struct tcs_response_pool *pool = resp->drv->resp_pool;
  194. unsigned long flags;
  195. spin_lock_irqsave(&pool->lock, flags);
  196. resp->err = -EINVAL;
  197. bitmap_clear(pool->avail, resp->idx, 1);
  198. spin_unlock_irqrestore(&pool->lock, flags);
  199. }
  200. static inline struct tcs_response *get_response(struct rsc_drv *drv, u32 m,
  201. bool for_use)
  202. {
  203. struct tcs_response_pool *pool = drv->resp_pool;
  204. struct tcs_response *resp = NULL;
  205. int pos = 0;
  206. unsigned long flags;
  207. spin_lock_irqsave(&pool->lock, flags);
  208. do {
  209. pos = find_next_bit(pool->avail, MAX_POOL_SIZE, pos);
  210. if (pos == MAX_POOL_SIZE)
  211. break;
  212. resp = &pool->resp[pos];
  213. if (resp->m == m && !resp->in_use) {
  214. resp->in_use = for_use;
  215. break;
  216. }
  217. pos++;
  218. } while (1);
  219. spin_unlock_irqrestore(&pool->lock, flags);
  220. return resp;
  221. }
  222. static void print_response(struct rsc_drv *drv, int m)
  223. {
  224. struct tcs_response *resp;
  225. struct tcs_mbox_msg *msg;
  226. int i;
  227. resp = get_response(drv, m, false);
  228. if (!resp)
  229. return;
  230. msg = resp->msg;
  231. pr_warn("Response object [idx=%d for-tcs=%d in-use=%d]\n",
  232. resp->idx, resp->m, resp->in_use);
  233. pr_warn("Msg: state=%d\n", msg->state);
  234. for (i = 0; i < msg->num_payload; i++)
  235. pr_warn("addr=0x%x data=0x%x complete=0x%x\n",
  236. msg->payload[i].addr,
  237. msg->payload[i].data,
  238. msg->payload[i].complete);
  239. }
  240. static inline u32 read_drv_config(void __iomem *base)
  241. {
  242. return le32_to_cpu(readl_relaxed(base + DRV_PRNT_CHLD_CONFIG));
  243. }
  244. static inline u32 read_tcs_reg(void __iomem *base, int reg, int m, int n)
  245. {
  246. return le32_to_cpu(readl_relaxed(base + reg +
  247. RSC_DRV_TCS_OFFSET * m + RSC_DRV_CMD_OFFSET * n));
  248. }
  249. static inline void write_tcs_reg(void __iomem *base, int reg, int m, int n,
  250. u32 data)
  251. {
  252. writel_relaxed(cpu_to_le32(data), base + reg +
  253. RSC_DRV_TCS_OFFSET * m + RSC_DRV_CMD_OFFSET * n);
  254. }
  255. static inline void write_tcs_reg_sync(void __iomem *base, int reg, int m, int n,
  256. u32 data)
  257. {
  258. do {
  259. write_tcs_reg(base, reg, m, n, data);
  260. if (data == read_tcs_reg(base, reg, m, n))
  261. break;
  262. udelay(1);
  263. } while (1);
  264. }
  265. static inline bool tcs_is_free(struct rsc_drv *drv, int m)
  266. {
  267. void __iomem *base = drv->reg_base;
  268. return read_tcs_reg(base, RSC_DRV_STATUS, m, 0) &&
  269. !atomic_read(&drv->tcs_in_use[m]);
  270. }
  271. static inline struct tcs_mbox *get_tcs_from_index(struct rsc_drv *drv, int m)
  272. {
  273. struct tcs_mbox *tcs = NULL;
  274. int i;
  275. for (i = 0; i < drv->num_tcs; i++) {
  276. tcs = &drv->tcs[i];
  277. if (tcs->tcs_mask & (u32)BIT(m))
  278. break;
  279. }
  280. if (i == drv->num_tcs) {
  281. WARN(1, "Incorrect TCS index %d", m);
  282. tcs = NULL;
  283. }
  284. return tcs;
  285. }
  286. static inline struct tcs_mbox *get_tcs_of_type(struct rsc_drv *drv, int type)
  287. {
  288. int i;
  289. struct tcs_mbox *tcs;
  290. for (i = 0; i < TCS_TYPE_NR; i++)
  291. if (type == drv->tcs[i].type)
  292. break;
  293. if (i == TCS_TYPE_NR)
  294. return ERR_PTR(-EINVAL);
  295. tcs = &drv->tcs[i];
  296. if (!tcs->num_tcs)
  297. return ERR_PTR(-EINVAL);
  298. return tcs;
  299. }
  300. static inline struct tcs_mbox *get_tcs_for_msg(struct rsc_drv *drv,
  301. struct tcs_mbox_msg *msg)
  302. {
  303. int type = -1;
  304. /* Which box are we dropping this in and do we trigger the TCS */
  305. switch (msg->state) {
  306. case RPMH_SLEEP_STATE:
  307. type = SLEEP_TCS;
  308. break;
  309. case RPMH_WAKE_ONLY_STATE:
  310. type = WAKE_TCS;
  311. break;
  312. case RPMH_ACTIVE_ONLY_STATE:
  313. type = ACTIVE_TCS;
  314. break;
  315. case RPMH_AWAKE_STATE:
  316. /*
  317. * Awake state is only used when the DRV has no separate
  318. * TCS for ACTIVE requests. Switch to WAKE TCS to send
  319. * active votes. Otherwise, the caller should be explicit
  320. * about the state.
  321. */
  322. if (IS_ERR(get_tcs_of_type(drv, ACTIVE_TCS)))
  323. type = WAKE_TCS;
  324. break;
  325. }
  326. if (msg->is_read)
  327. type = ACTIVE_TCS;
  328. if (type < 0)
  329. return ERR_PTR(-EINVAL);
  330. return get_tcs_of_type(drv, type);
  331. }
  332. static inline void send_tcs_response(struct tcs_response *resp)
  333. {
  334. struct rsc_drv *drv = resp->drv;
  335. unsigned long flags;
  336. spin_lock_irqsave(&drv->drv_lock, flags);
  337. INIT_LIST_HEAD(&resp->list);
  338. list_add_tail(&resp->list, &drv->response_pending);
  339. spin_unlock_irqrestore(&drv->drv_lock, flags);
  340. tasklet_schedule(&drv->tasklet);
  341. }
  342. static inline void enable_tcs_irq(struct rsc_drv *drv, int m, bool enable)
  343. {
  344. void __iomem *base = drv->reg_base;
  345. u32 data;
  346. /* Enable interrupts for non-ACTIVE TCS */
  347. data = read_tcs_reg(base, RSC_DRV_IRQ_ENABLE, 0, 0);
  348. if (enable)
  349. data |= BIT(m);
  350. else
  351. data &= ~BIT(m);
  352. write_tcs_reg(base, RSC_DRV_IRQ_ENABLE, 0, 0, data);
  353. }
  354. /**
  355. * tcs_irq_handler: TX Done / Recv data handler
  356. */
  357. static irqreturn_t tcs_irq_handler(int irq, void *p)
  358. {
  359. struct rsc_drv *drv = p;
  360. void __iomem *base = drv->reg_base;
  361. int m, i;
  362. u32 irq_status, sts;
  363. struct tcs_mbox *tcs;
  364. struct tcs_response *resp;
  365. struct tcs_cmd *cmd;
  366. u32 data;
  367. /* Know which TCSes were triggered */
  368. irq_status = read_tcs_reg(base, RSC_DRV_IRQ_STATUS, 0, 0);
  369. for (m = 0; m < drv->num_tcs; m++) {
  370. if (!(irq_status & (u32)BIT(m)))
  371. continue;
  372. atomic_inc(&drv->tcs_irq_count[m]);
  373. resp = get_response(drv, m, true);
  374. if (!resp) {
  375. pr_err("No resp request for TCS-%d\n", m);
  376. goto no_resp;
  377. }
  378. /* Check if all commands were completed */
  379. resp->err = 0;
  380. for (i = 0; i < resp->msg->num_payload; i++) {
  381. cmd = &resp->msg->payload[i];
  382. sts = read_tcs_reg(base, RSC_DRV_CMD_STATUS, m, i);
  383. if ((!(sts & CMD_STATUS_ISSUED)) ||
  384. ((resp->msg->is_complete || cmd->complete) &&
  385. (!(sts & CMD_STATUS_COMPL)))) {
  386. resp->err = -EIO;
  387. break;
  388. }
  389. }
  390. /* Check for response if this was a read request */
  391. if (resp->msg->is_read) {
  392. /* Respond the data back in the same req data */
  393. data = read_tcs_reg(base, RSC_DRV_CMD_RESP_DATA, m, 0);
  394. resp->msg->payload[0].data = data;
  395. mbox_chan_received_data(resp->chan, resp->msg);
  396. }
  397. log_rpmh_notify_irq(drv, m, resp->msg->payload[0].addr,
  398. resp->err);
  399. /* Clear the AMC mode for non-ACTIVE TCSes */
  400. tcs = get_tcs_from_index(drv, m);
  401. if (tcs && tcs->type != ACTIVE_TCS) {
  402. data = read_tcs_reg(base, RSC_DRV_CONTROL, m, 0);
  403. data &= ~TCS_AMC_MODE_TRIGGER;
  404. write_tcs_reg_sync(base, RSC_DRV_CONTROL, m, 0, data);
  405. data &= ~TCS_AMC_MODE_ENABLE;
  406. write_tcs_reg(base, RSC_DRV_CONTROL, m, 0, data);
  407. /*
  408. * Disable interrupt for this TCS to avoid being
  409. * spammed with interrupts coming when the solver
  410. * sends its wake votes.
  411. */
  412. enable_tcs_irq(drv, m, false);
  413. } else {
  414. /* Clear the enable bit for the commands */
  415. write_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0, 0);
  416. }
  417. no_resp:
  418. /* Record the recvd time stamp */
  419. drv->tcs_last_recv_ts[m] = arch_counter_get_cntvct();
  420. /* Clear the TCS IRQ status */
  421. write_tcs_reg(base, RSC_DRV_IRQ_CLEAR, 0, 0, BIT(m));
  422. /* Notify the client that this request is completed. */
  423. atomic_set(&drv->tcs_in_use[m], 0);
  424. /* Clean up response object and notify mbox in tasklet */
  425. if (resp)
  426. send_tcs_response(resp);
  427. }
  428. return IRQ_HANDLED;
  429. }
  430. static inline void mbox_notify_tx_done(struct mbox_chan *chan,
  431. struct tcs_mbox_msg *msg, int m, int err)
  432. {
  433. struct rsc_drv *drv = container_of(chan->mbox, struct rsc_drv, mbox);
  434. log_rpmh_notify(drv, m, msg->payload[0].addr, err);
  435. mbox_chan_txdone(chan, err);
  436. }
  437. static void respond_tx_done(struct tcs_response *resp)
  438. {
  439. struct mbox_chan *chan = resp->chan;
  440. struct tcs_mbox_msg *msg = resp->msg;
  441. int err = resp->err;
  442. int m = resp->m;
  443. free_response(resp);
  444. mbox_notify_tx_done(chan, msg, m, err);
  445. }
  446. /**
  447. * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
  448. */
  449. static void tcs_notify_tx_done(unsigned long data)
  450. {
  451. struct rsc_drv *drv = (struct rsc_drv *)data;
  452. struct tcs_response *resp;
  453. unsigned long flags;
  454. do {
  455. spin_lock_irqsave(&drv->drv_lock, flags);
  456. if (list_empty(&drv->response_pending)) {
  457. spin_unlock_irqrestore(&drv->drv_lock, flags);
  458. break;
  459. }
  460. resp = list_first_entry(&drv->response_pending,
  461. struct tcs_response, list);
  462. list_del(&resp->list);
  463. spin_unlock_irqrestore(&drv->drv_lock, flags);
  464. respond_tx_done(resp);
  465. } while (1);
  466. }
  467. static void __tcs_buffer_write(struct rsc_drv *drv, int d, int m, int n,
  468. struct tcs_mbox_msg *msg, bool trigger)
  469. {
  470. u32 msgid, cmd_msgid = 0;
  471. u32 cmd_enable = 0;
  472. u32 cmd_complete;
  473. u32 enable;
  474. struct tcs_cmd *cmd;
  475. int i;
  476. void __iomem *base = drv->reg_base;
  477. /* We have homologous command set i.e pure read or write, not a mix */
  478. cmd_msgid = CMD_MSGID_LEN;
  479. cmd_msgid |= (msg->is_complete) ? CMD_MSGID_RESP_REQ : 0;
  480. cmd_msgid |= (!msg->is_read) ? CMD_MSGID_WRITE : 0;
  481. /* Read the send-after-prev complete flag for those already in TCS */
  482. cmd_complete = read_tcs_reg(base, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
  483. for (i = 0; i < msg->num_payload; i++) {
  484. cmd = &msg->payload[i];
  485. cmd_enable |= BIT(n + i);
  486. cmd_complete |= cmd->complete << (n + i);
  487. msgid = cmd_msgid;
  488. msgid |= (cmd->complete) ? CMD_MSGID_RESP_REQ : 0;
  489. write_tcs_reg(base, RSC_DRV_CMD_MSGID, m, n + i, msgid);
  490. write_tcs_reg(base, RSC_DRV_CMD_ADDR, m, n + i, cmd->addr);
  491. write_tcs_reg(base, RSC_DRV_CMD_DATA, m, n + i, cmd->data);
  492. log_send_msg(drv, m, n + i, msgid, cmd->addr,
  493. cmd->data, cmd->complete, trigger);
  494. }
  495. /* Write the send-after-prev completion bits for the batch */
  496. write_tcs_reg(base, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0, cmd_complete);
  497. /* Enable the new commands in TCS */
  498. cmd_enable |= read_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0);
  499. write_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0, cmd_enable);
  500. if (trigger) {
  501. /*
  502. * HW req: Clear the DRV_CONTROL and enable TCS again
  503. * While clearing ensure that the AMC mode trigger is cleared
  504. * and then the mode enable is cleared.
  505. */
  506. enable = read_tcs_reg(base, RSC_DRV_CONTROL, m, 0);
  507. enable &= ~TCS_AMC_MODE_TRIGGER;
  508. write_tcs_reg_sync(base, RSC_DRV_CONTROL, m, 0, enable);
  509. enable &= ~TCS_AMC_MODE_ENABLE;
  510. write_tcs_reg_sync(base, RSC_DRV_CONTROL, m, 0, enable);
  511. /* Enable the AMC mode on the TCS and then trigger the TCS */
  512. enable = TCS_AMC_MODE_ENABLE;
  513. write_tcs_reg_sync(base, RSC_DRV_CONTROL, m, 0, enable);
  514. enable |= TCS_AMC_MODE_TRIGGER;
  515. write_tcs_reg(base, RSC_DRV_CONTROL, m, 0, enable);
  516. }
  517. }
  518. /**
  519. * rsc_drv_is_idle: Check if any of the AMCs are busy.
  520. *
  521. * @mbox: The mailbox controller.
  522. *
  523. * Returns true if the AMCs are not engaged or absent.
  524. */
  525. static bool rsc_drv_is_idle(struct mbox_controller *mbox)
  526. {
  527. int m;
  528. struct rsc_drv *drv = container_of(mbox, struct rsc_drv, mbox);
  529. struct tcs_mbox *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
  530. /* Check for WAKE TCS if there are no ACTIVE TCS */
  531. if (IS_ERR(tcs))
  532. tcs = get_tcs_of_type(drv, WAKE_TCS);
  533. for (m = tcs->tcs_offset; m < tcs->tcs_offset + tcs->num_tcs; m++)
  534. if (!tcs_is_free(drv, m))
  535. return false;
  536. return true;
  537. }
  538. static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_mbox *tcs,
  539. struct tcs_mbox_msg *msg)
  540. {
  541. u32 curr_enabled, addr;
  542. int i, j, k;
  543. void __iomem *base = drv->reg_base;
  544. int m = tcs->tcs_offset;
  545. for (i = 0; i < tcs->num_tcs; i++, m++) {
  546. if (tcs_is_free(drv, m))
  547. continue;
  548. curr_enabled = read_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0);
  549. for (j = 0; j < MAX_CMDS_PER_TCS; j++) {
  550. if (!(curr_enabled & (u32)BIT(j)))
  551. continue;
  552. addr = read_tcs_reg(base, RSC_DRV_CMD_ADDR, m, j);
  553. for (k = 0; k < msg->num_payload; k++) {
  554. if (addr == msg->payload[k].addr)
  555. return -EBUSY;
  556. }
  557. }
  558. }
  559. return 0;
  560. }
  561. static int find_free_tcs(struct tcs_mbox *tcs)
  562. {
  563. int slot = -EBUSY;
  564. int m = 0;
  565. /* Loop until we find a free AMC */
  566. for (m = 0; m < tcs->num_tcs; m++) {
  567. if (tcs_is_free(tcs->drv, tcs->tcs_offset + m)) {
  568. slot = m * tcs->ncpt;
  569. break;
  570. }
  571. }
  572. return slot;
  573. }
  574. static int find_match(struct tcs_mbox *tcs, struct tcs_cmd *cmd, int len)
  575. {
  576. bool found = false;
  577. int i = 0, j;
  578. /* Check for already cached commands */
  579. while ((i = find_next_bit(tcs->slots, MAX_TCS_SLOTS, i)) <
  580. MAX_TCS_SLOTS) {
  581. if (tcs->cmd_addr[i] != cmd[0].addr) {
  582. i++;
  583. continue;
  584. }
  585. /* sanity check to ensure the seq is same */
  586. for (j = 1; j < len; j++) {
  587. WARN((tcs->cmd_addr[i + j] != cmd[j].addr),
  588. "Message does not match previous sequence.\n");
  589. return -EINVAL;
  590. }
  591. found = true;
  592. break;
  593. }
  594. return found ? i : -1;
  595. }
  596. static int find_slots(struct tcs_mbox *tcs, struct tcs_mbox_msg *msg)
  597. {
  598. int slot;
  599. int n = 0;
  600. /* For active requests find the first free AMC. */
  601. if (msg->state == RPMH_ACTIVE_ONLY_STATE ||
  602. msg->state == RPMH_AWAKE_STATE)
  603. return find_free_tcs(tcs);
  604. /* Find if we already have the msg in our TCS */
  605. slot = find_match(tcs, msg->payload, msg->num_payload);
  606. if (slot >= 0)
  607. return slot;
  608. /* Do over, until we can fit the full payload in a TCS */
  609. do {
  610. slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
  611. n, msg->num_payload, 0);
  612. if (slot == MAX_TCS_SLOTS)
  613. break;
  614. n += tcs->ncpt;
  615. } while (slot + msg->num_payload - 1 >= n);
  616. return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
  617. }
  618. static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
  619. bool trigger)
  620. {
  621. struct rsc_drv *drv = container_of(chan->mbox, struct rsc_drv, mbox);
  622. int d = drv->drv_id;
  623. struct tcs_mbox *tcs;
  624. int i, slot, offset, m, n, ret;
  625. struct tcs_response *resp = NULL;
  626. unsigned long flags;
  627. tcs = get_tcs_for_msg(drv, msg);
  628. if (IS_ERR(tcs))
  629. return PTR_ERR(tcs);
  630. if (trigger) {
  631. resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
  632. if (IS_ERR_OR_NULL(resp))
  633. return -EBUSY;
  634. }
  635. /* Identify the sequential slots that we can write to */
  636. spin_lock_irqsave(&tcs->tcs_lock, flags);
  637. slot = find_slots(tcs, msg);
  638. if (slot < 0) {
  639. spin_unlock_irqrestore(&tcs->tcs_lock, flags);
  640. if (resp)
  641. free_response(resp);
  642. return slot;
  643. }
  644. /* Figure out the TCS-m and CMD-n to write to */
  645. offset = slot / tcs->ncpt;
  646. m = offset + tcs->tcs_offset;
  647. n = slot % tcs->ncpt;
  648. if (trigger) {
  649. /* Block, if we have an address from the msg in flight */
  650. ret = check_for_req_inflight(drv, tcs, msg);
  651. if (ret) {
  652. spin_unlock_irqrestore(&tcs->tcs_lock, flags);
  653. if (resp)
  654. free_response(resp);
  655. return ret;
  656. }
  657. resp->m = m;
  658. /* Mark the TCS as busy */
  659. atomic_set(&drv->tcs_in_use[m], 1);
  660. atomic_inc(&drv->tcs_send_count[m]);
  661. /* Enable interrupt for active votes through wake TCS */
  662. if (tcs->type != ACTIVE_TCS)
  663. enable_tcs_irq(drv, m, true);
  664. drv->tcs_last_sent_ts[m] = arch_counter_get_cntvct();
  665. } else {
  666. /* Mark the slots as in-use, before we unlock */
  667. if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
  668. bitmap_set(tcs->slots, slot, msg->num_payload);
  669. /* Copy the addresses of the resources over to the slots */
  670. for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
  671. tcs->cmd_addr[slot + i] = msg->payload[i].addr;
  672. }
  673. /* Write to the TCS or AMC */
  674. __tcs_buffer_write(drv, d, m, n, msg, trigger);
  675. spin_unlock_irqrestore(&tcs->tcs_lock, flags);
  676. return 0;
  677. }
  678. static void __tcs_buffer_invalidate(void __iomem *base, int m)
  679. {
  680. write_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0, 0);
  681. }
  682. static int tcs_mbox_invalidate(struct mbox_chan *chan)
  683. {
  684. struct rsc_drv *drv = container_of(chan->mbox, struct rsc_drv, mbox);
  685. struct tcs_mbox *tcs;
  686. int m, i;
  687. int inv_types[] = { WAKE_TCS, SLEEP_TCS };
  688. int type = 0;
  689. unsigned long flags;
  690. do {
  691. tcs = get_tcs_of_type(drv, inv_types[type]);
  692. if (IS_ERR(tcs))
  693. return PTR_ERR(tcs);
  694. spin_lock_irqsave(&tcs->tcs_lock, flags);
  695. for (i = 0; i < tcs->num_tcs; i++) {
  696. m = i + tcs->tcs_offset;
  697. if (!tcs_is_free(drv, m)) {
  698. spin_unlock_irqrestore(&tcs->tcs_lock, flags);
  699. return -EBUSY;
  700. }
  701. __tcs_buffer_invalidate(drv->reg_base, m);
  702. }
  703. /* Mark the TCS as free */
  704. bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
  705. spin_unlock_irqrestore(&tcs->tcs_lock, flags);
  706. } while (++type < ARRAY_SIZE(inv_types));
  707. return 0;
  708. }
  709. static void print_tcs_regs(struct rsc_drv *drv, int m)
  710. {
  711. int n;
  712. struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
  713. void __iomem *base = drv->reg_base;
  714. u32 enable, addr, data, msgid, sts, irq_sts;
  715. if (!tcs || tcs_is_free(drv, m))
  716. return;
  717. enable = read_tcs_reg(base, RSC_DRV_CMD_ENABLE, m, 0);
  718. if (!enable)
  719. return;
  720. pr_warn("RSC:%s\n", drv->name);
  721. sts = read_tcs_reg(base, RSC_DRV_STATUS, m, 0);
  722. data = read_tcs_reg(base, RSC_DRV_CONTROL, m, 0);
  723. irq_sts = read_tcs_reg(base, RSC_DRV_IRQ_STATUS, 0, 0);
  724. pr_warn("TCS=%d [ctrlr-sts:%s amc-mode:0x%x irq-sts:%s]\n",
  725. m, sts ? "IDLE" : "BUSY", data,
  726. (irq_sts & BIT(m)) ? "COMPLETED" : "PENDING");
  727. for (n = 0; n < tcs->ncpt; n++) {
  728. if (!(enable & BIT(n)))
  729. continue;
  730. addr = read_tcs_reg(base, RSC_DRV_CMD_ADDR, m, n);
  731. data = read_tcs_reg(base, RSC_DRV_CMD_DATA, m, n);
  732. msgid = read_tcs_reg(base, RSC_DRV_CMD_MSGID, m, n);
  733. sts = read_tcs_reg(base, RSC_DRV_CMD_STATUS, m, n);
  734. pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x]\n",
  735. n, addr, data, msgid, sts);
  736. }
  737. }
  738. static void dump_tcs_stats(struct rsc_drv *drv)
  739. {
  740. int i;
  741. unsigned long long curr = arch_counter_get_cntvct();
  742. for (i = 0; i < drv->num_tcs; i++) {
  743. if (!atomic_read(&drv->tcs_in_use[i]))
  744. continue;
  745. pr_warn("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
  746. curr, i,
  747. atomic_read(&drv->tcs_send_count[i]),
  748. drv->tcs_last_sent_ts[i],
  749. atomic_read(&drv->tcs_irq_count[i]),
  750. drv->tcs_last_recv_ts[i]);
  751. print_tcs_regs(drv, i);
  752. print_response(drv, i);
  753. }
  754. }
  755. static void chan_debug(struct mbox_chan *chan)
  756. {
  757. struct rsc_drv *drv = container_of(chan->mbox, struct rsc_drv, mbox);
  758. dump_tcs_stats(drv);
  759. }
  760. /**
  761. * chan_tcs_write: Validate the incoming message and write to the
  762. * appropriate TCS block.
  763. *
  764. * @chan: the MBOX channel
  765. * @data: the tcs_mbox_msg*
  766. *
  767. * Returns a negative error for invalid message structure and invalid
  768. * message combination, -EBUSY if there is an other active request for
  769. * the channel in process, otherwise bubbles up internal error.
  770. */
  771. static int chan_tcs_write(struct mbox_chan *chan, void *data)
  772. {
  773. struct rsc_drv *drv = container_of(chan->mbox, struct rsc_drv, mbox);
  774. struct tcs_mbox_msg *msg = data;
  775. const struct device *dev = chan->cl->dev;
  776. int ret = 0;
  777. if (!msg) {
  778. dev_err(dev, "Payload error\n");
  779. ret = -EINVAL;
  780. goto tx_fail;
  781. }
  782. if (!msg->payload || !msg->num_payload ||
  783. msg->num_payload > MAX_RPMH_PAYLOAD) {
  784. dev_err(dev, "Payload error\n");
  785. ret = -EINVAL;
  786. goto tx_fail;
  787. }
  788. if (msg->invalidate || msg->is_control) {
  789. dev_err(dev, "Incorrect API\n");
  790. ret = -EINVAL;
  791. goto tx_fail;
  792. }
  793. if (msg->state != RPMH_ACTIVE_ONLY_STATE &&
  794. msg->state != RPMH_AWAKE_STATE) {
  795. dev_err(dev, "Incorrect API\n");
  796. ret = -EINVAL;
  797. goto tx_fail;
  798. }
  799. /* Read requests should always be single */
  800. if (msg->is_read && msg->num_payload > 1) {
  801. dev_err(dev, "Incorrect read request\n");
  802. ret = -EINVAL;
  803. goto tx_fail;
  804. }
  805. /*
  806. * Since we are re-purposing the wake TCS, invalidate previous
  807. * contents to avoid confusion.
  808. */
  809. if (msg->state == RPMH_AWAKE_STATE) {
  810. ret = tcs_mbox_invalidate(chan);
  811. if (ret)
  812. goto tx_fail;
  813. }
  814. /* Post the message to the TCS and trigger */
  815. ret = tcs_mbox_write(chan, msg, true);
  816. tx_fail:
  817. /* If there was an error in the request, schedule a response */
  818. if (ret < 0 && ret != -EBUSY) {
  819. struct tcs_response *resp = setup_response(
  820. drv, msg, chan, TCS_M_INIT, ret);
  821. dev_err(dev, "Error sending RPMH message %d\n", ret);
  822. if (!IS_ERR(resp))
  823. send_tcs_response(resp);
  824. else
  825. dev_err(dev, "No response object %ld\n", PTR_ERR(resp));
  826. ret = 0;
  827. }
  828. /* If we were just busy waiting for TCS, dump the state and return */
  829. if (ret == -EBUSY) {
  830. pr_info_ratelimited("TCS Busy, retrying RPMH message send\n");
  831. ret = -EAGAIN;
  832. }
  833. return ret;
  834. }
  835. static void __tcs_write_hidden(struct rsc_drv *drv, int d,
  836. struct tcs_mbox_msg *msg)
  837. {
  838. int i;
  839. void __iomem *addr = drv->base + TCS_HIDDEN_CMD0_DRV_DATA;
  840. for (i = 0; i < msg->num_payload; i++) {
  841. /* Only data is write capable */
  842. writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
  843. log_rpmh_control_msg(drv, msg->payload[i].data);
  844. addr += TCS_HIDDEN_CMD_SHIFT;
  845. }
  846. }
  847. static int tcs_control_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg)
  848. {
  849. const struct device *dev = chan->cl->dev;
  850. struct rsc_drv *drv = container_of(chan->mbox, struct rsc_drv, mbox);
  851. struct tcs_mbox *tcs;
  852. unsigned long flags;
  853. tcs = get_tcs_of_type(drv, CONTROL_TCS);
  854. if (IS_ERR(tcs))
  855. return PTR_ERR(tcs);
  856. if (msg->num_payload != tcs->ncpt) {
  857. dev_err(dev, "Request must fit the control TCS size\n");
  858. return -EINVAL;
  859. }
  860. spin_lock_irqsave(&tcs->tcs_lock, flags);
  861. __tcs_write_hidden(tcs->drv, drv->drv_id, msg);
  862. spin_unlock_irqrestore(&tcs->tcs_lock, flags);
  863. return 0;
  864. }
  865. /**
  866. * chan_tcs_ctrl_write: Write message to the controller, no ACK sent.
  867. *
  868. * @chan: the MBOX channel
  869. * @data: the tcs_mbox_msg*
  870. */
  871. static int chan_tcs_ctrl_write(struct mbox_chan *chan, void *data)
  872. {
  873. struct tcs_mbox_msg *msg = data;
  874. const struct device *dev = chan->cl->dev;
  875. int ret = -EINVAL;
  876. if (!msg) {
  877. dev_err(dev, "Payload error\n");
  878. goto tx_done;
  879. }
  880. if (!msg->payload || (!msg->num_payload && !msg->invalidate) ||
  881. msg->num_payload > MAX_RPMH_PAYLOAD) {
  882. dev_err(dev, "Payload error\n");
  883. goto tx_done;
  884. }
  885. /* Invalidate sleep/wake TCS */
  886. if (msg->invalidate) {
  887. ret = tcs_mbox_invalidate(chan);
  888. goto tx_done;
  889. }
  890. /* Control slots are unique. They carry specific data. */
  891. if (msg->is_control) {
  892. ret = tcs_control_write(chan, msg);
  893. goto tx_done;
  894. }
  895. /* Post the message to the TCS without trigger */
  896. ret = tcs_mbox_write(chan, msg, false);
  897. tx_done:
  898. return ret;
  899. }
  900. static int chan_init(struct mbox_chan *chan)
  901. {
  902. return 0;
  903. }
  904. static void chan_shutdown(struct mbox_chan *chan)
  905. { }
  906. static const struct mbox_chan_ops mbox_ops = {
  907. .send_data = chan_tcs_write,
  908. .write_controller_data = chan_tcs_ctrl_write,
  909. .startup = chan_init,
  910. .shutdown = chan_shutdown,
  911. };
  912. static struct mbox_chan *of_tcs_mbox_xlate(struct mbox_controller *mbox,
  913. const struct of_phandle_args *sp)
  914. {
  915. struct rsc_drv *drv = container_of(mbox, struct rsc_drv, mbox);
  916. struct mbox_chan *chan;
  917. if (drv->num_assigned >= mbox->num_chans) {
  918. pr_err("TCS-Mbox out of channel memory\n");
  919. return ERR_PTR(-ENOMEM);
  920. }
  921. chan = &mbox->chans[drv->num_assigned++];
  922. chan->con_priv = drv;
  923. return chan;
  924. }
  925. static int rsc_drv_probe(struct platform_device *pdev)
  926. {
  927. struct device_node *dn = pdev->dev.of_node;
  928. struct device_node *np;
  929. struct rsc_drv *drv;
  930. struct mbox_chan *chans;
  931. struct tcs_mbox *tcs;
  932. struct of_phandle_args p;
  933. int irq;
  934. u32 val[8] = { 0 };
  935. int num_chans = 0;
  936. int st = 0;
  937. int i, j, ret, nelem;
  938. u32 config, max_tcs, ncpt;
  939. int tcs_type_count[TCS_TYPE_NR] = { 0 };
  940. struct resource *res;
  941. drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
  942. if (!drv)
  943. return -ENOMEM;
  944. ret = of_property_read_u32(dn, "qcom,drv-id", &drv->drv_id);
  945. if (ret)
  946. return ret;
  947. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  948. if (!res)
  949. return -EINVAL;
  950. drv->addr = res->start;
  951. drv->base = devm_ioremap_resource(&pdev->dev, res);
  952. if (IS_ERR(drv->base))
  953. return PTR_ERR(drv->base);
  954. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  955. if (!res)
  956. return -EINVAL;
  957. drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
  958. if (IS_ERR(drv->reg_base))
  959. return PTR_ERR(drv->reg_base);
  960. config = read_drv_config(drv->base);
  961. max_tcs = config & (DRV_NUM_TCS_MASK <<
  962. (DRV_NUM_TCS_SHIFT * drv->drv_id));
  963. max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->drv_id);
  964. ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
  965. ncpt = ncpt >> DRV_NCPT_SHIFT;
  966. nelem = of_property_count_elems_of_size(dn, "qcom,tcs-config",
  967. sizeof(u32));
  968. if (!nelem || (nelem % 2) || (nelem > 2 * TCS_TYPE_NR))
  969. return -EINVAL;
  970. ret = of_property_read_u32_array(dn, "qcom,tcs-config", val, nelem);
  971. if (ret)
  972. return ret;
  973. /* Ensure we have exactly not more than one of each type in DT */
  974. for (i = 0; i < (nelem / 2); i++) {
  975. if (val[2 * i] >= TCS_TYPE_NR)
  976. return -EINVAL;
  977. tcs_type_count[val[2 * i]]++;
  978. if (tcs_type_count[val[2 * i]] > 1)
  979. return -EINVAL;
  980. }
  981. /* Ensure we have each type specified in DT */
  982. for (i = 0; i < ARRAY_SIZE(tcs_type_count); i++)
  983. if (!tcs_type_count[i])
  984. return -EINVAL;
  985. for (i = 0; i < (nelem / 2); i++) {
  986. tcs = &drv->tcs[val[2 * i]];
  987. tcs->drv = drv;
  988. tcs->type = val[2 * i];
  989. tcs->num_tcs = val[2 * i + 1];
  990. tcs->ncpt = (tcs->type == CONTROL_TCS) ? TCS_HIDDEN_MAX_SLOTS
  991. : ncpt;
  992. spin_lock_init(&tcs->tcs_lock);
  993. if (tcs->num_tcs <= 0 || tcs->type == CONTROL_TCS)
  994. continue;
  995. if (tcs->num_tcs > MAX_TCS_PER_TYPE ||
  996. st + tcs->num_tcs > max_tcs ||
  997. st + tcs->num_tcs >=
  998. BITS_PER_BYTE * sizeof(tcs->tcs_mask))
  999. return -EINVAL;
  1000. tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
  1001. tcs->tcs_offset = st;
  1002. st += tcs->num_tcs;
  1003. tcs->cmd_addr = devm_kzalloc(&pdev->dev, sizeof(u32) *
  1004. tcs->num_tcs * tcs->ncpt, GFP_KERNEL);
  1005. if (!tcs->cmd_addr)
  1006. return -ENOMEM;
  1007. }
  1008. /* Allocate only that many channels specified in DT for our MBOX */
  1009. for_each_node_with_property(np, "mboxes") {
  1010. if (!of_device_is_available(np))
  1011. continue;
  1012. i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
  1013. for (j = 0; j < i; j++) {
  1014. ret = of_parse_phandle_with_args(np, "mboxes",
  1015. "#mbox-cells", j, &p);
  1016. of_node_put(p.np);
  1017. if (!ret && p.np == pdev->dev.of_node) {
  1018. num_chans++;
  1019. break;
  1020. }
  1021. }
  1022. }
  1023. if (!num_chans) {
  1024. pr_err("%s: No clients for controller (%s)\n", __func__,
  1025. dn->full_name);
  1026. return -ENODEV;
  1027. }
  1028. chans = devm_kzalloc(&pdev->dev, num_chans * sizeof(*chans),
  1029. GFP_KERNEL);
  1030. if (!chans)
  1031. return -ENOMEM;
  1032. for (i = 0; i < num_chans; i++) {
  1033. chans[i].mbox = &drv->mbox;
  1034. chans[i].txdone_method = TXDONE_BY_IRQ;
  1035. }
  1036. drv->mbox.dev = &pdev->dev;
  1037. drv->mbox.ops = &mbox_ops;
  1038. drv->mbox.chans = chans;
  1039. drv->mbox.num_chans = num_chans;
  1040. drv->mbox.txdone_irq = true;
  1041. drv->mbox.of_xlate = of_tcs_mbox_xlate;
  1042. drv->mbox.is_idle = rsc_drv_is_idle;
  1043. drv->mbox.debug = chan_debug;
  1044. drv->num_tcs = st;
  1045. drv->pdev = pdev;
  1046. INIT_LIST_HEAD(&drv->response_pending);
  1047. spin_lock_init(&drv->drv_lock);
  1048. tasklet_init(&drv->tasklet, tcs_notify_tx_done, (unsigned long)drv);
  1049. drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
  1050. if (!drv->name)
  1051. drv->name = dev_name(&pdev->dev);
  1052. ret = tcs_response_pool_init(drv);
  1053. if (ret)
  1054. return ret;
  1055. irq = of_irq_get(dn, 0);
  1056. if (irq < 0)
  1057. return irq;
  1058. ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
  1059. IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
  1060. drv->name, drv);
  1061. if (ret)
  1062. return ret;
  1063. /* Enable interrupts for AMC TCS */
  1064. write_tcs_reg(drv->reg_base, RSC_DRV_IRQ_ENABLE, 0, 0,
  1065. drv->tcs[ACTIVE_TCS].tcs_mask);
  1066. for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
  1067. atomic_set(&drv->tcs_in_use[i], 0);
  1068. drv->ipc_log_ctx = ipc_log_context_create(RSC_DRV_IPC_LOG_SIZE,
  1069. drv->name, 0);
  1070. ret = mbox_controller_register(&drv->mbox);
  1071. if (ret)
  1072. return ret;
  1073. pr_debug("Mailbox controller (%s, drv=%d) registered\n",
  1074. dn->full_name, drv->drv_id);
  1075. return 0;
  1076. }
  1077. static const struct of_device_id rsc_drv_match[] = {
  1078. { .compatible = "qcom,tcs-drv", },
  1079. { }
  1080. };
  1081. static struct platform_driver rpmh_mbox_driver = {
  1082. .probe = rsc_drv_probe,
  1083. .driver = {
  1084. .name = KBUILD_MODNAME,
  1085. .of_match_table = rsc_drv_match,
  1086. },
  1087. };
  1088. static int __init rpmh_mbox_driver_init(void)
  1089. {
  1090. return platform_driver_register(&rpmh_mbox_driver);
  1091. }
  1092. arch_initcall(rpmh_mbox_driver_init);