rpm-smd.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156
  1. /* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #define pr_fmt(fmt) "%s: " fmt, __func__
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/types.h>
  17. #include <linux/bug.h>
  18. #include <linux/completion.h>
  19. #include <linux/delay.h>
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/irq.h>
  24. #include <linux/list.h>
  25. #include <linux/mutex.h>
  26. #include <linux/of_address.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/string.h>
  29. #include <linux/device.h>
  30. #include <linux/notifier.h>
  31. #include <linux/slab.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/of.h>
  34. #include <linux/of_platform.h>
  35. #include <linux/rbtree.h>
  36. #include <soc/qcom/rpm-notifier.h>
  37. #include <soc/qcom/rpm-smd.h>
  38. #include <soc/qcom/smd.h>
  39. #include <soc/qcom/glink_rpm_xprt.h>
  40. #include <soc/qcom/glink.h>
  41. #define CREATE_TRACE_POINTS
  42. #include <trace/events/trace_rpm_smd.h>
  43. /* Debug Definitions */
  44. enum {
  45. MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
  46. MSM_RPM_LOG_REQUEST_RAW = BIT(1),
  47. MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
  48. };
  49. static int msm_rpm_debug_mask;
  50. module_param_named(
  51. debug_mask, msm_rpm_debug_mask, int, 0644
  52. );
  53. struct msm_rpm_driver_data {
  54. const char *ch_name;
  55. uint32_t ch_type;
  56. smd_channel_t *ch_info;
  57. struct work_struct work;
  58. spinlock_t smd_lock_write;
  59. spinlock_t smd_lock_read;
  60. struct completion smd_open;
  61. };
  62. struct glink_apps_rpm_data {
  63. const char *name;
  64. const char *edge;
  65. const char *xprt;
  66. void *glink_handle;
  67. struct glink_link_info *link_info;
  68. struct glink_open_config *open_cfg;
  69. struct work_struct work;
  70. };
  71. static bool glink_enabled;
  72. static struct glink_apps_rpm_data *glink_data;
  73. #define DEFAULT_BUFFER_SIZE 256
  74. #define DEBUG_PRINT_BUFFER_SIZE 512
  75. #define MAX_SLEEP_BUFFER 128
  76. #define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_NOIO)
  77. #define INV_RSC "resource does not exist"
  78. #define ERR "err\0"
  79. #define MAX_ERR_BUFFER_SIZE 128
  80. #define MAX_WAIT_ON_ACK 24
  81. #define INIT_ERROR 1
  82. #define V1_PROTOCOL_VERSION 0x31726576 /* rev1 */
  83. #define V0_PROTOCOL_VERSION 0 /* rev0 */
  84. #define RPM_MSG_TYPE_OFFSET 16
  85. #define RPM_MSG_TYPE_SIZE 8
  86. #define RPM_SET_TYPE_OFFSET 28
  87. #define RPM_SET_TYPE_SIZE 4
  88. #define RPM_REQ_LEN_OFFSET 0
  89. #define RPM_REQ_LEN_SIZE 16
  90. #define RPM_MSG_VERSION_OFFSET 24
  91. #define RPM_MSG_VERSION_SIZE 8
  92. #define RPM_MSG_VERSION 1
  93. #define RPM_MSG_SET_OFFSET 28
  94. #define RPM_MSG_SET_SIZE 4
  95. #define RPM_RSC_ID_OFFSET 16
  96. #define RPM_RSC_ID_SIZE 12
  97. #define RPM_DATA_LEN_OFFSET 0
  98. #define RPM_DATA_LEN_SIZE 16
  99. #define RPM_HDR_SIZE ((rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ?\
  100. sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr))
  101. #define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset))
  102. static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
  103. static bool standalone;
  104. static int probe_status = -EPROBE_DEFER;
  105. static int msm_rpm_read_smd_data(char *buf);
  106. static void msm_rpm_process_ack(uint32_t msg_id, int errno);
  107. int msm_rpm_register_notifier(struct notifier_block *nb)
  108. {
  109. return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
  110. }
  111. int msm_rpm_unregister_notifier(struct notifier_block *nb)
  112. {
  113. return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
  114. }
  115. enum {
  116. MSM_RPM_MSG_REQUEST_TYPE = 0,
  117. MSM_RPM_MSG_TYPE_NR,
  118. };
  119. static const uint32_t msm_rpm_request_service_v1[MSM_RPM_MSG_TYPE_NR] = {
  120. 0x716572, /* 'req\0' */
  121. };
  122. enum {
  123. RPM_V1_REQUEST_SERVICE,
  124. RPM_V1_SYSTEMDB_SERVICE,
  125. RPM_V1_COMMAND_SERVICE,
  126. RPM_V1_ACK_SERVICE,
  127. RPM_V1_NACK_SERVICE,
  128. } msm_rpm_request_service_v2;
  129. struct rpm_v0_hdr {
  130. uint32_t service_type;
  131. uint32_t request_len;
  132. };
  133. struct rpm_v1_hdr {
  134. uint32_t request_hdr;
  135. };
  136. struct rpm_message_header_v0 {
  137. struct rpm_v0_hdr hdr;
  138. uint32_t msg_id;
  139. enum msm_rpm_set set;
  140. uint32_t resource_type;
  141. uint32_t resource_id;
  142. uint32_t data_len;
  143. };
  144. struct rpm_message_header_v1 {
  145. struct rpm_v1_hdr hdr;
  146. uint32_t msg_id;
  147. uint32_t resource_type;
  148. uint32_t request_details;
  149. };
  150. struct msm_rpm_ack_msg_v0 {
  151. uint32_t req;
  152. uint32_t req_len;
  153. uint32_t rsc_id;
  154. uint32_t msg_len;
  155. uint32_t id_ack;
  156. };
  157. struct msm_rpm_ack_msg_v1 {
  158. uint32_t request_hdr;
  159. uint32_t id_ack;
  160. };
  161. struct kvp {
  162. unsigned int k;
  163. unsigned int s;
  164. };
  165. struct msm_rpm_kvp_data {
  166. uint32_t key;
  167. uint32_t nbytes; /* number of bytes */
  168. uint8_t *value;
  169. bool valid;
  170. };
  171. struct slp_buf {
  172. struct rb_node node;
  173. char ubuf[MAX_SLEEP_BUFFER];
  174. char *buf;
  175. bool valid;
  176. };
  177. enum rpm_msg_fmts {
  178. RPM_MSG_V0_FMT,
  179. RPM_MSG_V1_FMT
  180. };
  181. static uint32_t rpm_msg_fmt_ver;
  182. module_param_named(
  183. rpm_msg_fmt_ver, rpm_msg_fmt_ver, uint, 0444
  184. );
  185. static struct rb_root tr_root = RB_ROOT;
  186. static int (*msm_rpm_send_buffer)(char *buf, uint32_t size, bool noirq);
  187. static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
  188. static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq);
  189. static uint32_t msm_rpm_get_next_msg_id(void);
  190. static inline uint32_t get_offset_value(uint32_t val, uint32_t offset,
  191. uint32_t size)
  192. {
  193. return (((val) & GENMASK(offset + size - 1, offset))
  194. >> offset);
  195. }
  196. static inline void change_offset_value(uint32_t *val, uint32_t offset,
  197. uint32_t size, int32_t val1)
  198. {
  199. uint32_t member = *val;
  200. uint32_t offset_val = get_offset_value(member, offset, size);
  201. uint32_t mask = (1 << size) - 1;
  202. offset_val += val1;
  203. *val &= CLEAR_FIELD(offset, size);
  204. *val |= ((offset_val & mask) << offset);
  205. }
  206. static inline void set_offset_value(uint32_t *val, uint32_t offset,
  207. uint32_t size, uint32_t val1)
  208. {
  209. uint32_t mask = (1 << size) - 1;
  210. *val &= CLEAR_FIELD(offset, size);
  211. *val |= ((val1 & mask) << offset);
  212. }
  213. static uint32_t get_msg_id(char *buf)
  214. {
  215. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  216. return ((struct rpm_message_header_v0 *)buf)->msg_id;
  217. return ((struct rpm_message_header_v1 *)buf)->msg_id;
  218. }
  219. static uint32_t get_ack_msg_id(char *buf)
  220. {
  221. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  222. return ((struct msm_rpm_ack_msg_v0 *)buf)->id_ack;
  223. return ((struct msm_rpm_ack_msg_v1 *)buf)->id_ack;
  224. }
  225. static uint32_t get_rsc_type(char *buf)
  226. {
  227. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  228. return ((struct rpm_message_header_v0 *)buf)->resource_type;
  229. return ((struct rpm_message_header_v1 *)buf)->resource_type;
  230. }
  231. static uint32_t get_set_type(char *buf)
  232. {
  233. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  234. return ((struct rpm_message_header_v0 *)buf)->set;
  235. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  236. request_details, RPM_SET_TYPE_OFFSET,
  237. RPM_SET_TYPE_SIZE);
  238. }
  239. static uint32_t get_data_len(char *buf)
  240. {
  241. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  242. return ((struct rpm_message_header_v0 *)buf)->data_len;
  243. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  244. request_details, RPM_DATA_LEN_OFFSET,
  245. RPM_DATA_LEN_SIZE);
  246. }
  247. static uint32_t get_rsc_id(char *buf)
  248. {
  249. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  250. return ((struct rpm_message_header_v0 *)buf)->resource_id;
  251. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  252. request_details, RPM_RSC_ID_OFFSET,
  253. RPM_RSC_ID_SIZE);
  254. }
  255. static uint32_t get_ack_req_len(char *buf)
  256. {
  257. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  258. return ((struct msm_rpm_ack_msg_v0 *)buf)->req_len;
  259. return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
  260. request_hdr, RPM_REQ_LEN_OFFSET,
  261. RPM_REQ_LEN_SIZE);
  262. }
  263. static uint32_t get_ack_msg_type(char *buf)
  264. {
  265. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  266. return ((struct msm_rpm_ack_msg_v0 *)buf)->req;
  267. return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
  268. request_hdr, RPM_MSG_TYPE_OFFSET,
  269. RPM_MSG_TYPE_SIZE);
  270. }
  271. static uint32_t get_req_len(char *buf)
  272. {
  273. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  274. return ((struct rpm_message_header_v0 *)buf)->hdr.request_len;
  275. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  276. hdr.request_hdr, RPM_REQ_LEN_OFFSET,
  277. RPM_REQ_LEN_SIZE);
  278. }
  279. static void set_msg_ver(char *buf, uint32_t val)
  280. {
  281. if (rpm_msg_fmt_ver) {
  282. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  283. hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
  284. RPM_MSG_VERSION_SIZE, val);
  285. } else {
  286. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  287. hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
  288. RPM_MSG_VERSION_SIZE, 0);
  289. }
  290. }
  291. static void set_req_len(char *buf, uint32_t val)
  292. {
  293. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
  294. ((struct rpm_message_header_v0 *)buf)->hdr.request_len = val;
  295. } else {
  296. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  297. hdr.request_hdr, RPM_REQ_LEN_OFFSET,
  298. RPM_REQ_LEN_SIZE, val);
  299. }
  300. }
  301. static void change_req_len(char *buf, int32_t val)
  302. {
  303. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
  304. ((struct rpm_message_header_v0 *)buf)->hdr.request_len += val;
  305. } else {
  306. change_offset_value(&((struct rpm_message_header_v1 *)buf)->
  307. hdr.request_hdr, RPM_REQ_LEN_OFFSET,
  308. RPM_REQ_LEN_SIZE, val);
  309. }
  310. }
  311. static void set_msg_type(char *buf, uint32_t val)
  312. {
  313. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
  314. ((struct rpm_message_header_v0 *)buf)->hdr.service_type =
  315. msm_rpm_request_service_v1[val];
  316. } else {
  317. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  318. hdr.request_hdr, RPM_MSG_TYPE_OFFSET,
  319. RPM_MSG_TYPE_SIZE, RPM_V1_REQUEST_SERVICE);
  320. }
  321. }
  322. static void set_rsc_id(char *buf, uint32_t val)
  323. {
  324. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  325. ((struct rpm_message_header_v0 *)buf)->resource_id = val;
  326. else
  327. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  328. request_details, RPM_RSC_ID_OFFSET,
  329. RPM_RSC_ID_SIZE, val);
  330. }
  331. static void set_data_len(char *buf, uint32_t val)
  332. {
  333. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  334. ((struct rpm_message_header_v0 *)buf)->data_len = val;
  335. else
  336. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  337. request_details, RPM_DATA_LEN_OFFSET,
  338. RPM_DATA_LEN_SIZE, val);
  339. }
  340. static void change_data_len(char *buf, int32_t val)
  341. {
  342. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  343. ((struct rpm_message_header_v0 *)buf)->data_len += val;
  344. else
  345. change_offset_value(&((struct rpm_message_header_v1 *)buf)->
  346. request_details, RPM_DATA_LEN_OFFSET,
  347. RPM_DATA_LEN_SIZE, val);
  348. }
  349. static void set_set_type(char *buf, uint32_t val)
  350. {
  351. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  352. ((struct rpm_message_header_v0 *)buf)->set = val;
  353. else
  354. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  355. request_details, RPM_SET_TYPE_OFFSET,
  356. RPM_SET_TYPE_SIZE, val);
  357. }
  358. static void set_msg_id(char *buf, uint32_t val)
  359. {
  360. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  361. ((struct rpm_message_header_v0 *)buf)->msg_id = val;
  362. else
  363. ((struct rpm_message_header_v1 *)buf)->msg_id = val;
  364. }
  365. static void set_rsc_type(char *buf, uint32_t val)
  366. {
  367. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  368. ((struct rpm_message_header_v0 *)buf)->resource_type = val;
  369. else
  370. ((struct rpm_message_header_v1 *)buf)->resource_type = val;
  371. }
  372. static inline int get_buf_len(char *buf)
  373. {
  374. return get_req_len(buf) + RPM_HDR_SIZE;
  375. }
  376. static inline struct kvp *get_first_kvp(char *buf)
  377. {
  378. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  379. return (struct kvp *)(buf +
  380. sizeof(struct rpm_message_header_v0));
  381. else
  382. return (struct kvp *)(buf +
  383. sizeof(struct rpm_message_header_v1));
  384. }
  385. static inline struct kvp *get_next_kvp(struct kvp *k)
  386. {
  387. return (struct kvp *)((void *)k + sizeof(*k) + k->s);
  388. }
  389. static inline void *get_data(struct kvp *k)
  390. {
  391. return (void *)k + sizeof(*k);
  392. }
  393. static void delete_kvp(char *buf, struct kvp *d)
  394. {
  395. struct kvp *n;
  396. int dec;
  397. uint32_t size;
  398. n = get_next_kvp(d);
  399. dec = (void *)n - (void *)d;
  400. size = get_data_len(buf) -
  401. ((void *)n - (void *)get_first_kvp(buf));
  402. memcpy((void *)d, (void *)n, size);
  403. change_data_len(buf, -dec);
  404. change_req_len(buf, -dec);
  405. }
  406. static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
  407. {
  408. memcpy(get_data(dest), get_data(src), src->s);
  409. }
  410. static void add_kvp(char *buf, struct kvp *n)
  411. {
  412. int32_t inc = sizeof(*n) + n->s;
  413. if (get_req_len(buf) + inc > MAX_SLEEP_BUFFER) {
  414. WARN_ON(get_req_len(buf) + inc > MAX_SLEEP_BUFFER);
  415. return;
  416. }
  417. memcpy(buf + get_buf_len(buf), n, inc);
  418. change_data_len(buf, inc);
  419. change_req_len(buf, inc);
  420. }
  421. static struct slp_buf *tr_search(struct rb_root *root, char *slp)
  422. {
  423. unsigned int type = get_rsc_type(slp);
  424. unsigned int id = get_rsc_id(slp);
  425. struct rb_node *node = root->rb_node;
  426. while (node) {
  427. struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
  428. unsigned int ctype = get_rsc_type(cur->buf);
  429. unsigned int cid = get_rsc_id(cur->buf);
  430. if (type < ctype)
  431. node = node->rb_left;
  432. else if (type > ctype)
  433. node = node->rb_right;
  434. else if (id < cid)
  435. node = node->rb_left;
  436. else if (id > cid)
  437. node = node->rb_right;
  438. else
  439. return cur;
  440. }
  441. return NULL;
  442. }
  443. static int tr_insert(struct rb_root *root, struct slp_buf *slp)
  444. {
  445. unsigned int type = get_rsc_type(slp->buf);
  446. unsigned int id = get_rsc_id(slp->buf);
  447. struct rb_node **node = &(root->rb_node), *parent = NULL;
  448. while (*node) {
  449. struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
  450. unsigned int ctype = get_rsc_type(curr->buf);
  451. unsigned int cid = get_rsc_id(curr->buf);
  452. parent = *node;
  453. if (type < ctype)
  454. node = &((*node)->rb_left);
  455. else if (type > ctype)
  456. node = &((*node)->rb_right);
  457. else if (id < cid)
  458. node = &((*node)->rb_left);
  459. else if (id > cid)
  460. node = &((*node)->rb_right);
  461. else
  462. return -EINVAL;
  463. }
  464. rb_link_node(&slp->node, parent, node);
  465. rb_insert_color(&slp->node, root);
  466. slp->valid = true;
  467. return 0;
  468. }
  469. #define for_each_kvp(buf, k) \
  470. for (k = (struct kvp *)get_first_kvp(buf); \
  471. ((void *)k - (void *)get_first_kvp(buf)) < \
  472. get_data_len(buf);\
  473. k = get_next_kvp(k))
  474. static void tr_update(struct slp_buf *s, char *buf)
  475. {
  476. struct kvp *e, *n;
  477. for_each_kvp(buf, n) {
  478. bool found = false;
  479. for_each_kvp(s->buf, e) {
  480. if (n->k == e->k) {
  481. found = true;
  482. if (n->s == e->s) {
  483. void *e_data = get_data(e);
  484. void *n_data = get_data(n);
  485. if (memcmp(e_data, n_data, n->s)) {
  486. update_kvp_data(e, n);
  487. s->valid = true;
  488. }
  489. } else {
  490. delete_kvp(s->buf, e);
  491. add_kvp(s->buf, n);
  492. s->valid = true;
  493. }
  494. break;
  495. }
  496. }
  497. if (!found) {
  498. add_kvp(s->buf, n);
  499. s->valid = true;
  500. }
  501. }
  502. }
  503. static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
  504. struct msm_rpm_request {
  505. uint8_t *client_buf;
  506. struct msm_rpm_kvp_data *kvp;
  507. uint32_t num_elements;
  508. uint32_t write_idx;
  509. uint8_t *buf;
  510. uint32_t numbytes;
  511. };
  512. /*
  513. * Data related to message acknowledgment
  514. */
  515. LIST_HEAD(msm_rpm_wait_list);
  516. struct msm_rpm_wait_data {
  517. struct list_head list;
  518. uint32_t msg_id;
  519. bool ack_recd;
  520. int errno;
  521. struct completion ack;
  522. bool delete_on_ack;
  523. };
  524. DEFINE_SPINLOCK(msm_rpm_list_lock);
  525. LIST_HEAD(msm_rpm_ack_list);
  526. static struct tasklet_struct data_tasklet;
  527. static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
  528. {
  529. return get_ack_msg_id(buf);
  530. }
  531. static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
  532. {
  533. uint8_t *tmp;
  534. uint32_t req_len = get_ack_req_len(buf);
  535. uint32_t msg_type = get_ack_msg_type(buf);
  536. int rc = -ENODEV;
  537. uint32_t err;
  538. uint32_t ack_msg_size = rpm_msg_fmt_ver ?
  539. sizeof(struct msm_rpm_ack_msg_v1) :
  540. sizeof(struct msm_rpm_ack_msg_v0);
  541. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT &&
  542. msg_type == RPM_V1_ACK_SERVICE) {
  543. return 0;
  544. } else if (rpm_msg_fmt_ver && msg_type == RPM_V1_NACK_SERVICE) {
  545. err = *(uint32_t *)(buf + sizeof(struct msm_rpm_ack_msg_v1));
  546. return err;
  547. }
  548. req_len -= ack_msg_size;
  549. req_len += 2 * sizeof(uint32_t);
  550. if (!req_len)
  551. return 0;
  552. pr_err("%s:rpm returned error or nack req_len: %d id_ack: %d\n",
  553. __func__, req_len, get_ack_msg_id(buf));
  554. tmp = buf + ack_msg_size;
  555. if (memcmp(tmp, ERR, sizeof(uint32_t))) {
  556. pr_err("%s rpm returned error\n", __func__);
  557. WARN_ON(1);
  558. }
  559. tmp += 2 * sizeof(uint32_t);
  560. if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
  561. sizeof(INV_RSC))-1))) {
  562. pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
  563. rc = -EINVAL;
  564. } else {
  565. pr_err("%s(): RPM NACK Invalid header\n", __func__);
  566. }
  567. return rc;
  568. }
  569. int msm_rpm_smd_buffer_request(struct msm_rpm_request *cdata,
  570. uint32_t size, gfp_t flag)
  571. {
  572. struct slp_buf *slp;
  573. static DEFINE_SPINLOCK(slp_buffer_lock);
  574. unsigned long flags;
  575. char *buf;
  576. buf = cdata->buf;
  577. if (size > MAX_SLEEP_BUFFER)
  578. return -ENOMEM;
  579. spin_lock_irqsave(&slp_buffer_lock, flags);
  580. slp = tr_search(&tr_root, buf);
  581. if (!slp) {
  582. slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
  583. if (!slp) {
  584. spin_unlock_irqrestore(&slp_buffer_lock, flags);
  585. return -ENOMEM;
  586. }
  587. slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
  588. memcpy(slp->buf, buf, size);
  589. if (tr_insert(&tr_root, slp))
  590. pr_err("Error updating sleep request\n");
  591. } else {
  592. /* handle unsent requests */
  593. tr_update(slp, buf);
  594. }
  595. trace_rpm_smd_sleep_set(get_msg_id(cdata->client_buf),
  596. get_rsc_type(cdata->client_buf),
  597. get_req_len(cdata->client_buf));
  598. spin_unlock_irqrestore(&slp_buffer_lock, flags);
  599. return 0;
  600. }
  601. static struct msm_rpm_driver_data msm_rpm_data = {
  602. .smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
  603. };
  604. static int msm_rpm_glink_rx_poll(void *glink_handle)
  605. {
  606. int ret;
  607. ret = glink_rpm_rx_poll(glink_handle);
  608. if (ret >= 0)
  609. /*
  610. * Sleep for 50us at a time before checking
  611. * for packet availability. The 50us is based
  612. * on the the time rpm could take to process
  613. * and send an ack for the sleep set request.
  614. */
  615. udelay(50);
  616. else
  617. pr_err("Not receieve an ACK from RPM. ret = %d\n", ret);
  618. return ret;
  619. }
  620. /*
  621. * Returns
  622. * = 0 on successful reads
  623. * > 0 on successful reads with no further data
  624. * standard Linux error codes on failure.
  625. */
  626. static int msm_rpm_read_sleep_ack(void)
  627. {
  628. int ret;
  629. char buf[MAX_ERR_BUFFER_SIZE] = {0};
  630. if (glink_enabled)
  631. ret = msm_rpm_glink_rx_poll(glink_data->glink_handle);
  632. else {
  633. ret = msm_rpm_read_smd_data(buf);
  634. if (!ret)
  635. ret = smd_is_pkt_avail(msm_rpm_data.ch_info);
  636. }
  637. return ret;
  638. }
  639. static int msm_rpm_flush_requests(bool print)
  640. {
  641. struct rb_node *t;
  642. int ret;
  643. int count = 0;
  644. for (t = rb_first(&tr_root); t; t = rb_next(t)) {
  645. struct slp_buf *s = rb_entry(t, struct slp_buf, node);
  646. unsigned int type = get_rsc_type(s->buf);
  647. unsigned int id = get_rsc_id(s->buf);
  648. if (!s->valid)
  649. continue;
  650. set_msg_id(s->buf, msm_rpm_get_next_msg_id());
  651. if (!glink_enabled)
  652. ret = msm_rpm_send_smd_buffer(s->buf,
  653. get_buf_len(s->buf), true);
  654. else
  655. ret = msm_rpm_glink_send_buffer(s->buf,
  656. get_buf_len(s->buf), true);
  657. WARN_ON(ret != get_buf_len(s->buf));
  658. trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id);
  659. s->valid = false;
  660. count++;
  661. /*
  662. * RPM acks need to be handled here if we have sent 24
  663. * messages such that we do not overrun SMD buffer. Since
  664. * we expect only sleep sets at this point (RPM PC would be
  665. * disallowed if we had pending active requests), we need not
  666. * process these sleep set acks.
  667. */
  668. if (count >= MAX_WAIT_ON_ACK) {
  669. int ret = msm_rpm_read_sleep_ack();
  670. if (ret >= 0)
  671. count--;
  672. else
  673. return ret;
  674. }
  675. }
  676. return 0;
  677. }
  678. static void msm_rpm_notify_sleep_chain(char *buf,
  679. struct msm_rpm_kvp_data *kvp)
  680. {
  681. struct msm_rpm_notifier_data notif;
  682. notif.rsc_type = get_rsc_type(buf);
  683. notif.rsc_id = get_req_len(buf);
  684. notif.key = kvp->key;
  685. notif.size = kvp->nbytes;
  686. notif.value = kvp->value;
  687. atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
  688. }
  689. static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
  690. uint32_t key, const uint8_t *data, int size, bool noirq)
  691. {
  692. uint32_t i;
  693. uint32_t data_size, msg_size;
  694. if (probe_status)
  695. return probe_status;
  696. if (!handle || !data) {
  697. pr_err("%s(): Invalid handle/data\n", __func__);
  698. return -EINVAL;
  699. }
  700. if (size < 0)
  701. return -EINVAL;
  702. data_size = ALIGN(size, SZ_4);
  703. msg_size = data_size + 8;
  704. for (i = 0; i < handle->write_idx; i++) {
  705. if (handle->kvp[i].key != key)
  706. continue;
  707. if (handle->kvp[i].nbytes != data_size) {
  708. kfree(handle->kvp[i].value);
  709. handle->kvp[i].value = NULL;
  710. } else {
  711. if (!memcmp(handle->kvp[i].value, data, data_size))
  712. return 0;
  713. }
  714. break;
  715. }
  716. if (i >= handle->num_elements) {
  717. pr_err("Number of resources exceeds max allocated\n");
  718. return -ENOMEM;
  719. }
  720. if (i == handle->write_idx)
  721. handle->write_idx++;
  722. if (!handle->kvp[i].value) {
  723. handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
  724. if (!handle->kvp[i].value)
  725. return -ENOMEM;
  726. } else {
  727. /* We enter the else case, if a key already exists but the
  728. * data doesn't match. In which case, we should zero the data
  729. * out.
  730. */
  731. memset(handle->kvp[i].value, 0, data_size);
  732. }
  733. if (!handle->kvp[i].valid)
  734. change_data_len(handle->client_buf, msg_size);
  735. else
  736. change_data_len(handle->client_buf,
  737. (data_size - handle->kvp[i].nbytes));
  738. handle->kvp[i].nbytes = data_size;
  739. handle->kvp[i].key = key;
  740. memcpy(handle->kvp[i].value, data, size);
  741. handle->kvp[i].valid = true;
  742. return 0;
  743. }
  744. static struct msm_rpm_request *msm_rpm_create_request_common(
  745. enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
  746. int num_elements, bool noirq)
  747. {
  748. struct msm_rpm_request *cdata;
  749. uint32_t buf_size;
  750. if (probe_status)
  751. return ERR_PTR(probe_status);
  752. cdata = kzalloc(sizeof(struct msm_rpm_request),
  753. GFP_FLAG(noirq));
  754. if (!cdata) {
  755. pr_err("Cannot allocate memory for client data\n");
  756. goto cdata_alloc_fail;
  757. }
  758. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  759. buf_size = sizeof(struct rpm_message_header_v0);
  760. else
  761. buf_size = sizeof(struct rpm_message_header_v1);
  762. cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq));
  763. if (!cdata->client_buf)
  764. goto client_buf_alloc_fail;
  765. set_set_type(cdata->client_buf, set);
  766. set_rsc_type(cdata->client_buf, rsc_type);
  767. set_rsc_id(cdata->client_buf, rsc_id);
  768. cdata->num_elements = num_elements;
  769. cdata->write_idx = 0;
  770. cdata->kvp = kcalloc(num_elements, sizeof(struct msm_rpm_kvp_data),
  771. GFP_FLAG(noirq));
  772. if (!cdata->kvp) {
  773. pr_warn("%s(): Cannot allocate memory for key value data\n",
  774. __func__);
  775. goto kvp_alloc_fail;
  776. }
  777. cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
  778. if (!cdata->buf)
  779. goto buf_alloc_fail;
  780. cdata->numbytes = DEFAULT_BUFFER_SIZE;
  781. return cdata;
  782. buf_alloc_fail:
  783. kfree(cdata->kvp);
  784. kvp_alloc_fail:
  785. kfree(cdata->client_buf);
  786. client_buf_alloc_fail:
  787. kfree(cdata);
  788. cdata_alloc_fail:
  789. return NULL;
  790. }
  791. void msm_rpm_free_request(struct msm_rpm_request *handle)
  792. {
  793. int i;
  794. if (!handle)
  795. return;
  796. for (i = 0; i < handle->num_elements; i++)
  797. kfree(handle->kvp[i].value);
  798. kfree(handle->kvp);
  799. kfree(handle->client_buf);
  800. kfree(handle->buf);
  801. kfree(handle);
  802. }
  803. EXPORT_SYMBOL(msm_rpm_free_request);
  804. struct msm_rpm_request *msm_rpm_create_request(
  805. enum msm_rpm_set set, uint32_t rsc_type,
  806. uint32_t rsc_id, int num_elements)
  807. {
  808. return msm_rpm_create_request_common(set, rsc_type, rsc_id,
  809. num_elements, false);
  810. }
  811. EXPORT_SYMBOL(msm_rpm_create_request);
  812. struct msm_rpm_request *msm_rpm_create_request_noirq(
  813. enum msm_rpm_set set, uint32_t rsc_type,
  814. uint32_t rsc_id, int num_elements)
  815. {
  816. return msm_rpm_create_request_common(set, rsc_type, rsc_id,
  817. num_elements, true);
  818. }
  819. EXPORT_SYMBOL(msm_rpm_create_request_noirq);
  820. int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
  821. uint32_t key, const uint8_t *data, int size)
  822. {
  823. return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
  824. }
  825. EXPORT_SYMBOL(msm_rpm_add_kvp_data);
  826. int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
  827. uint32_t key, const uint8_t *data, int size)
  828. {
  829. return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
  830. }
  831. EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
  832. /* Runs in interrupt context */
  833. static void msm_rpm_notify(void *data, unsigned int event)
  834. {
  835. struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
  836. WARN_ON(!pdata);
  837. if (!(pdata->ch_info))
  838. return;
  839. switch (event) {
  840. case SMD_EVENT_DATA:
  841. tasklet_schedule(&data_tasklet);
  842. trace_rpm_smd_interrupt_notify("interrupt notification");
  843. break;
  844. case SMD_EVENT_OPEN:
  845. complete(&pdata->smd_open);
  846. break;
  847. case SMD_EVENT_CLOSE:
  848. case SMD_EVENT_STATUS:
  849. case SMD_EVENT_REOPEN_READY:
  850. break;
  851. default:
  852. pr_info("Unknown SMD event\n");
  853. }
  854. }
  855. bool msm_rpm_waiting_for_ack(void)
  856. {
  857. bool ret;
  858. unsigned long flags;
  859. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  860. ret = list_empty(&msm_rpm_wait_list);
  861. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  862. return !ret;
  863. }
  864. static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
  865. {
  866. struct list_head *ptr;
  867. struct msm_rpm_wait_data *elem = NULL;
  868. unsigned long flags;
  869. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  870. list_for_each(ptr, &msm_rpm_wait_list) {
  871. elem = list_entry(ptr, struct msm_rpm_wait_data, list);
  872. if (elem && (elem->msg_id == msg_id))
  873. break;
  874. elem = NULL;
  875. }
  876. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  877. return elem;
  878. }
  879. static uint32_t msm_rpm_get_next_msg_id(void)
  880. {
  881. uint32_t id;
  882. /*
  883. * A message id of 0 is used by the driver to indicate a error
  884. * condition. The RPM driver uses a id of 1 to indicate unsent data
  885. * when the data sent over hasn't been modified. This isn't a error
  886. * scenario and wait for ack returns a success when the message id is 1.
  887. */
  888. do {
  889. id = atomic_inc_return(&msm_rpm_msg_id);
  890. } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
  891. return id;
  892. }
  893. static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack)
  894. {
  895. unsigned long flags;
  896. struct msm_rpm_wait_data *data =
  897. kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
  898. if (!data)
  899. return -ENOMEM;
  900. init_completion(&data->ack);
  901. data->ack_recd = false;
  902. data->msg_id = msg_id;
  903. data->errno = INIT_ERROR;
  904. data->delete_on_ack = delete_on_ack;
  905. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  906. if (delete_on_ack)
  907. list_add_tail(&data->list, &msm_rpm_wait_list);
  908. else
  909. list_add(&data->list, &msm_rpm_wait_list);
  910. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  911. return 0;
  912. }
  913. static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
  914. {
  915. unsigned long flags;
  916. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  917. list_del(&elem->list);
  918. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  919. kfree(elem);
  920. }
  921. static void msm_rpm_process_ack(uint32_t msg_id, int errno)
  922. {
  923. struct list_head *ptr, *next;
  924. struct msm_rpm_wait_data *elem = NULL;
  925. unsigned long flags;
  926. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  927. list_for_each_safe(ptr, next, &msm_rpm_wait_list) {
  928. elem = list_entry(ptr, struct msm_rpm_wait_data, list);
  929. if (elem->msg_id == msg_id) {
  930. elem->errno = errno;
  931. elem->ack_recd = true;
  932. complete(&elem->ack);
  933. if (elem->delete_on_ack) {
  934. list_del(&elem->list);
  935. kfree(elem);
  936. }
  937. break;
  938. }
  939. }
  940. /* Special case where the sleep driver doesn't
  941. * wait for ACKs. This would decrease the latency involved with
  942. * entering RPM assisted power collapse.
  943. */
  944. if (!elem)
  945. trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADBEEF);
  946. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  947. }
  948. struct msm_rpm_kvp_packet {
  949. uint32_t id;
  950. uint32_t len;
  951. uint32_t val;
  952. };
  953. static int msm_rpm_read_smd_data(char *buf)
  954. {
  955. int pkt_sz;
  956. int bytes_read = 0;
  957. pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
  958. if (!pkt_sz)
  959. return -EAGAIN;
  960. if (pkt_sz > MAX_ERR_BUFFER_SIZE) {
  961. pr_err("rpm_smd pkt_sz is greater than max size\n");
  962. goto error;
  963. }
  964. if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
  965. return -EAGAIN;
  966. do {
  967. int len;
  968. len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
  969. pkt_sz -= len;
  970. bytes_read += len;
  971. } while (pkt_sz > 0);
  972. if (pkt_sz < 0) {
  973. pr_err("rpm_smd pkt_sz is less than zero\n");
  974. goto error;
  975. }
  976. return 0;
  977. error:
  978. WARN_ON(1);
  979. return 0;
  980. }
  981. static void data_fn_tasklet(unsigned long data)
  982. {
  983. uint32_t msg_id;
  984. int errno;
  985. char buf[MAX_ERR_BUFFER_SIZE] = {0};
  986. spin_lock(&msm_rpm_data.smd_lock_read);
  987. while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
  988. if (msm_rpm_read_smd_data(buf))
  989. break;
  990. msg_id = msm_rpm_get_msg_id_from_ack(buf);
  991. errno = msm_rpm_get_error_from_ack(buf);
  992. trace_rpm_smd_ack_recvd(0, msg_id, errno);
  993. msm_rpm_process_ack(msg_id, errno);
  994. }
  995. spin_unlock(&msm_rpm_data.smd_lock_read);
  996. }
  997. static void msm_rpm_log_request(struct msm_rpm_request *cdata)
  998. {
  999. char buf[DEBUG_PRINT_BUFFER_SIZE];
  1000. size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
  1001. char name[5];
  1002. u32 value;
  1003. uint32_t i;
  1004. int j, prev_valid;
  1005. int valid_count = 0;
  1006. int pos = 0;
  1007. uint32_t res_type, rsc_id;
  1008. name[4] = 0;
  1009. for (i = 0; i < cdata->write_idx; i++)
  1010. if (cdata->kvp[i].valid)
  1011. valid_count++;
  1012. pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
  1013. if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
  1014. pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
  1015. get_msg_id(cdata->client_buf));
  1016. pos += scnprintf(buf + pos, buflen - pos, "s=%s",
  1017. (get_set_type(cdata->client_buf) ==
  1018. MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
  1019. res_type = get_rsc_type(cdata->client_buf);
  1020. rsc_id = get_rsc_id(cdata->client_buf);
  1021. if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
  1022. && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
  1023. /* Both pretty and raw formatting */
  1024. memcpy(name, &res_type, sizeof(uint32_t));
  1025. pos += scnprintf(buf + pos, buflen - pos,
  1026. ", rsc_type=0x%08X (%s), rsc_id=%u; ",
  1027. res_type, name, rsc_id);
  1028. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  1029. if (!cdata->kvp[i].valid)
  1030. continue;
  1031. memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
  1032. pos += scnprintf(buf + pos, buflen - pos,
  1033. "[key=0x%08X (%s), value=%s",
  1034. cdata->kvp[i].key, name,
  1035. (cdata->kvp[i].nbytes ? "0x" : "null"));
  1036. for (j = 0; j < cdata->kvp[i].nbytes; j++)
  1037. pos += scnprintf(buf + pos, buflen - pos,
  1038. "%02X ",
  1039. cdata->kvp[i].value[j]);
  1040. if (cdata->kvp[i].nbytes)
  1041. pos += scnprintf(buf + pos, buflen - pos, "(");
  1042. for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
  1043. value = 0;
  1044. memcpy(&value, &cdata->kvp[i].value[j],
  1045. min_t(uint32_t, sizeof(uint32_t),
  1046. cdata->kvp[i].nbytes - j));
  1047. pos += scnprintf(buf + pos, buflen - pos, "%u",
  1048. value);
  1049. if (j + 4 < cdata->kvp[i].nbytes)
  1050. pos += scnprintf(buf + pos,
  1051. buflen - pos, " ");
  1052. }
  1053. if (cdata->kvp[i].nbytes)
  1054. pos += scnprintf(buf + pos, buflen - pos, ")");
  1055. pos += scnprintf(buf + pos, buflen - pos, "]");
  1056. if (prev_valid + 1 < valid_count)
  1057. pos += scnprintf(buf + pos, buflen - pos, ", ");
  1058. prev_valid++;
  1059. }
  1060. } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
  1061. /* Pretty formatting only */
  1062. memcpy(name, &res_type, sizeof(uint32_t));
  1063. pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
  1064. rsc_id);
  1065. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  1066. if (!cdata->kvp[i].valid)
  1067. continue;
  1068. memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
  1069. pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
  1070. name, (cdata->kvp[i].nbytes ? "" : "null"));
  1071. for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
  1072. value = 0;
  1073. memcpy(&value, &cdata->kvp[i].value[j],
  1074. min_t(uint32_t, sizeof(uint32_t),
  1075. cdata->kvp[i].nbytes - j));
  1076. pos += scnprintf(buf + pos, buflen - pos, "%u",
  1077. value);
  1078. if (j + 4 < cdata->kvp[i].nbytes)
  1079. pos += scnprintf(buf + pos,
  1080. buflen - pos, " ");
  1081. }
  1082. if (prev_valid + 1 < valid_count)
  1083. pos += scnprintf(buf + pos, buflen - pos, ", ");
  1084. prev_valid++;
  1085. }
  1086. } else {
  1087. /* Raw formatting only */
  1088. pos += scnprintf(buf + pos, buflen - pos,
  1089. ", rsc_type=0x%08X, rsc_id=%u; ", res_type, rsc_id);
  1090. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  1091. if (!cdata->kvp[i].valid)
  1092. continue;
  1093. pos += scnprintf(buf + pos, buflen - pos,
  1094. "[key=0x%08X, value=%s",
  1095. cdata->kvp[i].key,
  1096. (cdata->kvp[i].nbytes ? "0x" : "null"));
  1097. for (j = 0; j < cdata->kvp[i].nbytes; j++) {
  1098. pos += scnprintf(buf + pos, buflen - pos,
  1099. "%02X",
  1100. cdata->kvp[i].value[j]);
  1101. if (j + 1 < cdata->kvp[i].nbytes)
  1102. pos += scnprintf(buf + pos,
  1103. buflen - pos, " ");
  1104. }
  1105. pos += scnprintf(buf + pos, buflen - pos, "]");
  1106. if (prev_valid + 1 < valid_count)
  1107. pos += scnprintf(buf + pos, buflen - pos, ", ");
  1108. prev_valid++;
  1109. }
  1110. }
  1111. pos += scnprintf(buf + pos, buflen - pos, "\n");
  1112. printk(buf);
  1113. }
  1114. static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
  1115. {
  1116. unsigned long flags;
  1117. int ret;
  1118. spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
  1119. ret = smd_write_avail(msm_rpm_data.ch_info);
  1120. while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
  1121. if (ret < 0)
  1122. break;
  1123. if (!noirq) {
  1124. spin_unlock_irqrestore(
  1125. &msm_rpm_data.smd_lock_write, flags);
  1126. cpu_relax();
  1127. spin_lock_irqsave(
  1128. &msm_rpm_data.smd_lock_write, flags);
  1129. } else
  1130. udelay(5);
  1131. }
  1132. if (ret < 0) {
  1133. pr_err("SMD not initialized\n");
  1134. spin_unlock_irqrestore(
  1135. &msm_rpm_data.smd_lock_write, flags);
  1136. return ret;
  1137. }
  1138. ret = smd_write(msm_rpm_data.ch_info, buf, size);
  1139. spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
  1140. return ret;
  1141. }
  1142. static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq)
  1143. {
  1144. int ret;
  1145. unsigned long flags;
  1146. int timeout = 50;
  1147. spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
  1148. do {
  1149. ret = glink_tx(glink_data->glink_handle, buf, buf,
  1150. size, GLINK_TX_SINGLE_THREADED);
  1151. if (ret == -EBUSY || ret == -ENOSPC) {
  1152. if (!noirq) {
  1153. spin_unlock_irqrestore(
  1154. &msm_rpm_data.smd_lock_write, flags);
  1155. cpu_relax();
  1156. spin_lock_irqsave(
  1157. &msm_rpm_data.smd_lock_write, flags);
  1158. } else {
  1159. udelay(5);
  1160. }
  1161. timeout--;
  1162. } else {
  1163. ret = 0;
  1164. }
  1165. } while (ret && timeout);
  1166. spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
  1167. if (!timeout)
  1168. return 0;
  1169. else
  1170. return size;
  1171. }
  1172. static int msm_rpm_send_data(struct msm_rpm_request *cdata,
  1173. int msg_type, bool noirq, bool noack)
  1174. {
  1175. uint8_t *tmpbuff;
  1176. int ret;
  1177. uint32_t i;
  1178. uint32_t msg_size;
  1179. int msg_hdr_sz, req_hdr_sz;
  1180. uint32_t data_len = get_data_len(cdata->client_buf);
  1181. uint32_t set = get_set_type(cdata->client_buf);
  1182. uint32_t msg_id;
  1183. if (probe_status)
  1184. return probe_status;
  1185. if (!data_len)
  1186. return 1;
  1187. msg_hdr_sz = rpm_msg_fmt_ver ? sizeof(struct rpm_message_header_v1) :
  1188. sizeof(struct rpm_message_header_v0);
  1189. req_hdr_sz = RPM_HDR_SIZE;
  1190. set_msg_type(cdata->client_buf, msg_type);
  1191. set_req_len(cdata->client_buf, data_len + msg_hdr_sz - req_hdr_sz);
  1192. msg_size = get_req_len(cdata->client_buf) + req_hdr_sz;
  1193. /* populate data_len */
  1194. if (msg_size > cdata->numbytes) {
  1195. kfree(cdata->buf);
  1196. cdata->numbytes = msg_size;
  1197. cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
  1198. }
  1199. if (!cdata->buf) {
  1200. pr_err("Failed malloc\n");
  1201. return 0;
  1202. }
  1203. tmpbuff = cdata->buf;
  1204. tmpbuff += msg_hdr_sz;
  1205. for (i = 0; (i < cdata->write_idx); i++) {
  1206. /* Sanity check */
  1207. WARN_ON((tmpbuff - cdata->buf) > cdata->numbytes);
  1208. if (!cdata->kvp[i].valid)
  1209. continue;
  1210. memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
  1211. tmpbuff += sizeof(uint32_t);
  1212. memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
  1213. tmpbuff += sizeof(uint32_t);
  1214. memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
  1215. tmpbuff += cdata->kvp[i].nbytes;
  1216. if (set == MSM_RPM_CTX_SLEEP_SET)
  1217. msm_rpm_notify_sleep_chain(cdata->client_buf,
  1218. &cdata->kvp[i]);
  1219. }
  1220. memcpy(cdata->buf, cdata->client_buf, msg_hdr_sz);
  1221. if ((set == MSM_RPM_CTX_SLEEP_SET) &&
  1222. !msm_rpm_smd_buffer_request(cdata, msg_size,
  1223. GFP_FLAG(noirq)))
  1224. return 1;
  1225. msg_id = msm_rpm_get_next_msg_id();
  1226. /* Set the version bit for new protocol */
  1227. set_msg_ver(cdata->buf, rpm_msg_fmt_ver);
  1228. set_msg_id(cdata->buf, msg_id);
  1229. set_msg_id(cdata->client_buf, msg_id);
  1230. if (msm_rpm_debug_mask
  1231. & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
  1232. msm_rpm_log_request(cdata);
  1233. if (standalone) {
  1234. for (i = 0; (i < cdata->write_idx); i++)
  1235. cdata->kvp[i].valid = false;
  1236. set_data_len(cdata->client_buf, 0);
  1237. ret = msg_id;
  1238. return ret;
  1239. }
  1240. msm_rpm_add_wait_list(msg_id, noack);
  1241. ret = msm_rpm_send_buffer(&cdata->buf[0], msg_size, noirq);
  1242. if (ret == msg_size) {
  1243. for (i = 0; (i < cdata->write_idx); i++)
  1244. cdata->kvp[i].valid = false;
  1245. set_data_len(cdata->client_buf, 0);
  1246. ret = msg_id;
  1247. trace_rpm_smd_send_active_set(msg_id,
  1248. get_rsc_type(cdata->client_buf),
  1249. get_rsc_id(cdata->client_buf));
  1250. } else if (ret < msg_size) {
  1251. struct msm_rpm_wait_data *rc;
  1252. ret = 0;
  1253. pr_err("Failed to write data msg_size:%d ret:%d msg_id:%d\n",
  1254. msg_size, ret, msg_id);
  1255. rc = msm_rpm_get_entry_from_msg_id(msg_id);
  1256. if (rc)
  1257. msm_rpm_free_list_entry(rc);
  1258. }
  1259. return ret;
  1260. }
  1261. static int _msm_rpm_send_request(struct msm_rpm_request *handle, bool noack)
  1262. {
  1263. int ret;
  1264. static DEFINE_MUTEX(send_mtx);
  1265. mutex_lock(&send_mtx);
  1266. ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false, noack);
  1267. mutex_unlock(&send_mtx);
  1268. return ret;
  1269. }
  1270. int msm_rpm_send_request(struct msm_rpm_request *handle)
  1271. {
  1272. return _msm_rpm_send_request(handle, false);
  1273. }
  1274. EXPORT_SYMBOL(msm_rpm_send_request);
  1275. int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
  1276. {
  1277. return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true, false);
  1278. }
  1279. EXPORT_SYMBOL(msm_rpm_send_request_noirq);
  1280. void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
  1281. {
  1282. int ret;
  1283. ret = _msm_rpm_send_request(handle, true);
  1284. return ret < 0 ? ERR_PTR(ret) : NULL;
  1285. }
  1286. EXPORT_SYMBOL(msm_rpm_send_request_noack);
  1287. int msm_rpm_wait_for_ack(uint32_t msg_id)
  1288. {
  1289. struct msm_rpm_wait_data *elem;
  1290. int rc = 0;
  1291. if (!msg_id) {
  1292. pr_err("Invalid msg id\n");
  1293. return -ENOMEM;
  1294. }
  1295. if (msg_id == 1)
  1296. return rc;
  1297. if (standalone)
  1298. return rc;
  1299. elem = msm_rpm_get_entry_from_msg_id(msg_id);
  1300. if (!elem)
  1301. return rc;
  1302. wait_for_completion(&elem->ack);
  1303. trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADFEED);
  1304. rc = elem->errno;
  1305. msm_rpm_free_list_entry(elem);
  1306. return rc;
  1307. }
  1308. EXPORT_SYMBOL(msm_rpm_wait_for_ack);
  1309. static void msm_rpm_smd_read_data_noirq(uint32_t msg_id)
  1310. {
  1311. uint32_t id = 0;
  1312. while (id != msg_id) {
  1313. if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
  1314. int errno;
  1315. char buf[MAX_ERR_BUFFER_SIZE] = {};
  1316. msm_rpm_read_smd_data(buf);
  1317. id = msm_rpm_get_msg_id_from_ack(buf);
  1318. errno = msm_rpm_get_error_from_ack(buf);
  1319. trace_rpm_smd_ack_recvd(1, msg_id, errno);
  1320. msm_rpm_process_ack(id, errno);
  1321. }
  1322. }
  1323. }
  1324. static void msm_rpm_glink_read_data_noirq(struct msm_rpm_wait_data *elem)
  1325. {
  1326. int ret;
  1327. /* Use rx_poll method to read the message from RPM */
  1328. while (elem->errno) {
  1329. ret = glink_rpm_rx_poll(glink_data->glink_handle);
  1330. if (ret >= 0) {
  1331. /*
  1332. * We might have receieve the notification.
  1333. * Now we have to check whether the notification
  1334. * received is what we are interested?
  1335. * Wait for few usec to get the notification
  1336. * before re-trying the poll again.
  1337. */
  1338. udelay(50);
  1339. } else {
  1340. pr_err("rx poll return error = %d\n", ret);
  1341. }
  1342. }
  1343. }
  1344. int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
  1345. {
  1346. struct msm_rpm_wait_data *elem;
  1347. unsigned long flags;
  1348. int rc = 0;
  1349. if (!msg_id) {
  1350. pr_err("Invalid msg id\n");
  1351. return -ENOMEM;
  1352. }
  1353. if (msg_id == 1)
  1354. return 0;
  1355. if (standalone)
  1356. return 0;
  1357. spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
  1358. elem = msm_rpm_get_entry_from_msg_id(msg_id);
  1359. if (!elem)
  1360. /* Should this be a bug
  1361. * Is it ok for another thread to read the msg?
  1362. */
  1363. goto wait_ack_cleanup;
  1364. if (elem->errno != INIT_ERROR) {
  1365. rc = elem->errno;
  1366. msm_rpm_free_list_entry(elem);
  1367. goto wait_ack_cleanup;
  1368. }
  1369. if (!glink_enabled)
  1370. msm_rpm_smd_read_data_noirq(msg_id);
  1371. else
  1372. msm_rpm_glink_read_data_noirq(elem);
  1373. rc = elem->errno;
  1374. msm_rpm_free_list_entry(elem);
  1375. wait_ack_cleanup:
  1376. spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
  1377. if (!glink_enabled)
  1378. if (smd_is_pkt_avail(msm_rpm_data.ch_info))
  1379. tasklet_schedule(&data_tasklet);
  1380. return rc;
  1381. }
  1382. EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
  1383. void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
  1384. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1385. {
  1386. int i, rc;
  1387. struct msm_rpm_request *req =
  1388. msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems,
  1389. false);
  1390. if (IS_ERR(req))
  1391. return req;
  1392. if (!req)
  1393. return ERR_PTR(ENOMEM);
  1394. for (i = 0; i < nelems; i++) {
  1395. rc = msm_rpm_add_kvp_data(req, kvp[i].key,
  1396. kvp[i].data, kvp[i].length);
  1397. if (rc)
  1398. goto bail;
  1399. }
  1400. rc = PTR_ERR(msm_rpm_send_request_noack(req));
  1401. bail:
  1402. msm_rpm_free_request(req);
  1403. return rc < 0 ? ERR_PTR(rc) : NULL;
  1404. }
  1405. EXPORT_SYMBOL(msm_rpm_send_message_noack);
  1406. int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
  1407. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1408. {
  1409. int i, rc;
  1410. struct msm_rpm_request *req =
  1411. msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
  1412. if (IS_ERR(req))
  1413. return PTR_ERR(req);
  1414. if (!req)
  1415. return -ENOMEM;
  1416. for (i = 0; i < nelems; i++) {
  1417. rc = msm_rpm_add_kvp_data(req, kvp[i].key,
  1418. kvp[i].data, kvp[i].length);
  1419. if (rc)
  1420. goto bail;
  1421. }
  1422. rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
  1423. bail:
  1424. msm_rpm_free_request(req);
  1425. return rc;
  1426. }
  1427. EXPORT_SYMBOL(msm_rpm_send_message);
  1428. int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
  1429. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1430. {
  1431. int i, rc;
  1432. struct msm_rpm_request *req =
  1433. msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
  1434. if (IS_ERR(req))
  1435. return PTR_ERR(req);
  1436. if (!req)
  1437. return -ENOMEM;
  1438. for (i = 0; i < nelems; i++) {
  1439. rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
  1440. kvp[i].data, kvp[i].length);
  1441. if (rc)
  1442. goto bail;
  1443. }
  1444. rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
  1445. bail:
  1446. msm_rpm_free_request(req);
  1447. return rc;
  1448. }
  1449. EXPORT_SYMBOL(msm_rpm_send_message_noirq);
  1450. /**
  1451. * During power collapse, the rpm driver disables the SMD interrupts to make
  1452. * sure that the interrupt doesn't wakes us from sleep.
  1453. */
  1454. int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
  1455. {
  1456. int ret = 0;
  1457. if (standalone)
  1458. return 0;
  1459. if (!glink_enabled)
  1460. ret = smd_mask_receive_interrupt(msm_rpm_data.ch_info,
  1461. true, cpumask);
  1462. else
  1463. ret = glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
  1464. true, (void *)cpumask);
  1465. if (!ret) {
  1466. ret = msm_rpm_flush_requests(print);
  1467. if (ret) {
  1468. if (!glink_enabled)
  1469. smd_mask_receive_interrupt(
  1470. msm_rpm_data.ch_info, false, NULL);
  1471. else
  1472. glink_rpm_mask_rx_interrupt(
  1473. glink_data->glink_handle, false, NULL);
  1474. }
  1475. }
  1476. return ret;
  1477. }
  1478. EXPORT_SYMBOL(msm_rpm_enter_sleep);
  1479. /**
  1480. * When the system resumes from power collapse, the SMD interrupt disabled by
  1481. * enter function has to reenabled to continue processing SMD message.
  1482. */
  1483. void msm_rpm_exit_sleep(void)
  1484. {
  1485. int ret;
  1486. if (standalone)
  1487. return;
  1488. do {
  1489. ret = msm_rpm_read_sleep_ack();
  1490. } while (ret > 0);
  1491. if (!glink_enabled)
  1492. smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
  1493. else
  1494. glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
  1495. false, NULL);
  1496. }
  1497. EXPORT_SYMBOL(msm_rpm_exit_sleep);
  1498. /*
  1499. * Whenever there is a data from RPM, notify_rx will be called.
  1500. * This function is invoked either interrupt OR polling context.
  1501. */
  1502. static void msm_rpm_trans_notify_rx(void *handle, const void *priv,
  1503. const void *pkt_priv, const void *ptr, size_t size)
  1504. {
  1505. uint32_t msg_id;
  1506. int errno;
  1507. char buf[MAX_ERR_BUFFER_SIZE] = {0};
  1508. struct msm_rpm_wait_data *elem;
  1509. static DEFINE_SPINLOCK(rx_notify_lock);
  1510. unsigned long flags;
  1511. if (!size)
  1512. return;
  1513. WARN_ON(size > MAX_ERR_BUFFER_SIZE);
  1514. spin_lock_irqsave(&rx_notify_lock, flags);
  1515. memcpy(buf, ptr, size);
  1516. msg_id = msm_rpm_get_msg_id_from_ack(buf);
  1517. errno = msm_rpm_get_error_from_ack(buf);
  1518. elem = msm_rpm_get_entry_from_msg_id(msg_id);
  1519. /*
  1520. * It is applicable for sleep set requests
  1521. * Sleep set requests are not added to the
  1522. * wait queue list. Without this check we
  1523. * run into NULL pointer deferrence issue.
  1524. */
  1525. if (!elem) {
  1526. spin_unlock_irqrestore(&rx_notify_lock, flags);
  1527. glink_rx_done(handle, ptr, 0);
  1528. return;
  1529. }
  1530. msm_rpm_process_ack(msg_id, errno);
  1531. spin_unlock_irqrestore(&rx_notify_lock, flags);
  1532. glink_rx_done(handle, ptr, 0);
  1533. }
  1534. static void msm_rpm_trans_notify_state(void *handle, const void *priv,
  1535. unsigned int event)
  1536. {
  1537. switch (event) {
  1538. case GLINK_CONNECTED:
  1539. glink_data->glink_handle = handle;
  1540. if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
  1541. pr_err("glink_handle %d\n",
  1542. (int)PTR_ERR(glink_data->glink_handle));
  1543. WARN_ON(1);
  1544. }
  1545. /*
  1546. * Do not allow clients to send data to RPM until glink
  1547. * is fully open.
  1548. */
  1549. probe_status = 0;
  1550. pr_info("glink config params: transport=%s, edge=%s, name=%s\n",
  1551. glink_data->xprt,
  1552. glink_data->edge,
  1553. glink_data->name);
  1554. break;
  1555. default:
  1556. pr_err("Unrecognized event %d\n", event);
  1557. break;
  1558. };
  1559. }
  1560. static void msm_rpm_trans_notify_tx_done(void *handle, const void *priv,
  1561. const void *pkt_priv, const void *ptr)
  1562. {
  1563. }
  1564. static void msm_rpm_glink_open_work(struct work_struct *work)
  1565. {
  1566. pr_debug("Opening glink channel\n");
  1567. glink_data->glink_handle = glink_open(glink_data->open_cfg);
  1568. if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
  1569. pr_err("Error: glink_open failed %d\n",
  1570. (int)PTR_ERR(glink_data->glink_handle));
  1571. WARN_ON(1);
  1572. }
  1573. }
  1574. static void msm_rpm_glink_notifier_cb(struct glink_link_state_cb_info *cb_info,
  1575. void *priv)
  1576. {
  1577. struct glink_open_config *open_config;
  1578. static bool first = true;
  1579. if (!cb_info) {
  1580. pr_err("Missing callback data\n");
  1581. return;
  1582. }
  1583. switch (cb_info->link_state) {
  1584. case GLINK_LINK_STATE_UP:
  1585. if (first)
  1586. first = false;
  1587. else
  1588. break;
  1589. open_config = kzalloc(sizeof(*open_config), GFP_KERNEL);
  1590. if (!open_config) {
  1591. pr_err("Could not allocate memory\n");
  1592. break;
  1593. }
  1594. glink_data->open_cfg = open_config;
  1595. pr_debug("glink link state up cb receieved\n");
  1596. INIT_WORK(&glink_data->work, msm_rpm_glink_open_work);
  1597. open_config->priv = glink_data;
  1598. open_config->name = glink_data->name;
  1599. open_config->edge = glink_data->edge;
  1600. open_config->notify_rx = msm_rpm_trans_notify_rx;
  1601. open_config->notify_tx_done = msm_rpm_trans_notify_tx_done;
  1602. open_config->notify_state = msm_rpm_trans_notify_state;
  1603. schedule_work(&glink_data->work);
  1604. break;
  1605. default:
  1606. pr_err("Unrecognised state = %d\n", cb_info->link_state);
  1607. break;
  1608. };
  1609. }
  1610. static int msm_rpm_glink_dt_parse(struct platform_device *pdev,
  1611. struct glink_apps_rpm_data *glink_data)
  1612. {
  1613. char *key = NULL;
  1614. int ret;
  1615. if (of_device_is_compatible(pdev->dev.of_node, "qcom,rpm-glink")) {
  1616. glink_enabled = true;
  1617. } else {
  1618. pr_warn("qcom,rpm-glink compatible not matches\n");
  1619. ret = -EINVAL;
  1620. return ret;
  1621. }
  1622. key = "qcom,glink-edge";
  1623. ret = of_property_read_string(pdev->dev.of_node, key,
  1624. &glink_data->edge);
  1625. if (ret) {
  1626. pr_err("Failed to read node: %s, key=%s\n",
  1627. pdev->dev.of_node->full_name, key);
  1628. return ret;
  1629. }
  1630. key = "rpm-channel-name";
  1631. ret = of_property_read_string(pdev->dev.of_node, key,
  1632. &glink_data->name);
  1633. if (ret)
  1634. pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
  1635. pdev->dev.of_node->full_name, key);
  1636. return ret;
  1637. }
  1638. static int msm_rpm_glink_link_setup(struct glink_apps_rpm_data *glink_data,
  1639. struct platform_device *pdev)
  1640. {
  1641. struct glink_link_info *link_info;
  1642. void *link_state_cb_handle;
  1643. struct device *dev = &pdev->dev;
  1644. int ret = 0;
  1645. link_info = devm_kzalloc(dev, sizeof(struct glink_link_info),
  1646. GFP_KERNEL);
  1647. if (!link_info) {
  1648. ret = -ENOMEM;
  1649. return ret;
  1650. }
  1651. glink_data->link_info = link_info;
  1652. /*
  1653. * Setup link info parameters
  1654. */
  1655. link_info->edge = glink_data->edge;
  1656. link_info->glink_link_state_notif_cb =
  1657. msm_rpm_glink_notifier_cb;
  1658. link_state_cb_handle = glink_register_link_state_cb(link_info, NULL);
  1659. if (IS_ERR_OR_NULL(link_state_cb_handle)) {
  1660. pr_err("Could not register cb\n");
  1661. ret = PTR_ERR(link_state_cb_handle);
  1662. return ret;
  1663. }
  1664. spin_lock_init(&msm_rpm_data.smd_lock_read);
  1665. spin_lock_init(&msm_rpm_data.smd_lock_write);
  1666. return ret;
  1667. }
  1668. static int msm_rpm_dev_glink_probe(struct platform_device *pdev)
  1669. {
  1670. int ret = -ENOMEM;
  1671. struct device *dev = &pdev->dev;
  1672. glink_data = devm_kzalloc(dev, sizeof(*glink_data), GFP_KERNEL);
  1673. if (!glink_data)
  1674. return ret;
  1675. ret = msm_rpm_glink_dt_parse(pdev, glink_data);
  1676. if (ret < 0) {
  1677. devm_kfree(dev, glink_data);
  1678. return ret;
  1679. }
  1680. ret = msm_rpm_glink_link_setup(glink_data, pdev);
  1681. if (ret < 0) {
  1682. /*
  1683. * If the glink setup fails there is no
  1684. * fall back mechanism to SMD.
  1685. */
  1686. pr_err("GLINK setup fail ret = %d\n", ret);
  1687. WARN_ON(1);
  1688. }
  1689. return ret;
  1690. }
  1691. static int msm_rpm_dev_probe(struct platform_device *pdev)
  1692. {
  1693. char *key = NULL;
  1694. int ret = 0;
  1695. void __iomem *reg_base;
  1696. uint32_t version = V0_PROTOCOL_VERSION; /* set to default v0 format */
  1697. /*
  1698. * Check for standalone support
  1699. */
  1700. key = "rpm-standalone";
  1701. standalone = of_property_read_bool(pdev->dev.of_node, key);
  1702. if (standalone) {
  1703. probe_status = ret;
  1704. goto skip_init;
  1705. }
  1706. reg_base = of_iomap(pdev->dev.of_node, 0);
  1707. if (reg_base) {
  1708. version = readq_relaxed(reg_base);
  1709. iounmap(reg_base);
  1710. }
  1711. if (version == V1_PROTOCOL_VERSION)
  1712. rpm_msg_fmt_ver = RPM_MSG_V1_FMT;
  1713. pr_debug("RPM-SMD running version %d/n", rpm_msg_fmt_ver);
  1714. ret = msm_rpm_dev_glink_probe(pdev);
  1715. if (!ret) {
  1716. pr_info("APSS-RPM communication over GLINK\n");
  1717. msm_rpm_send_buffer = msm_rpm_glink_send_buffer;
  1718. of_platform_populate(pdev->dev.of_node, NULL, NULL,
  1719. &pdev->dev);
  1720. return ret;
  1721. }
  1722. msm_rpm_send_buffer = msm_rpm_send_smd_buffer;
  1723. key = "rpm-channel-name";
  1724. ret = of_property_read_string(pdev->dev.of_node, key,
  1725. &msm_rpm_data.ch_name);
  1726. if (ret) {
  1727. pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
  1728. pdev->dev.of_node->full_name, key);
  1729. goto fail;
  1730. }
  1731. key = "rpm-channel-type";
  1732. ret = of_property_read_u32(pdev->dev.of_node, key,
  1733. &msm_rpm_data.ch_type);
  1734. if (ret) {
  1735. pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
  1736. pdev->dev.of_node->full_name, key);
  1737. goto fail;
  1738. }
  1739. ret = smd_named_open_on_edge(msm_rpm_data.ch_name,
  1740. msm_rpm_data.ch_type,
  1741. &msm_rpm_data.ch_info,
  1742. &msm_rpm_data,
  1743. msm_rpm_notify);
  1744. if (ret) {
  1745. if (ret != -EPROBE_DEFER) {
  1746. pr_err("%s: Cannot open RPM channel %s %d\n",
  1747. __func__, msm_rpm_data.ch_name,
  1748. msm_rpm_data.ch_type);
  1749. }
  1750. goto fail;
  1751. }
  1752. spin_lock_init(&msm_rpm_data.smd_lock_write);
  1753. spin_lock_init(&msm_rpm_data.smd_lock_read);
  1754. tasklet_init(&data_tasklet, data_fn_tasklet, 0);
  1755. wait_for_completion(&msm_rpm_data.smd_open);
  1756. smd_disable_read_intr(msm_rpm_data.ch_info);
  1757. probe_status = ret;
  1758. skip_init:
  1759. of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
  1760. if (standalone)
  1761. pr_info("RPM running in standalone mode\n");
  1762. fail:
  1763. return probe_status;
  1764. }
  1765. static const struct of_device_id msm_rpm_match_table[] = {
  1766. {.compatible = "qcom,rpm-smd"},
  1767. {.compatible = "qcom,rpm-glink"},
  1768. {},
  1769. };
  1770. static struct platform_driver msm_rpm_device_driver = {
  1771. .probe = msm_rpm_dev_probe,
  1772. .driver = {
  1773. .name = "rpm-smd",
  1774. .owner = THIS_MODULE,
  1775. .of_match_table = msm_rpm_match_table,
  1776. },
  1777. };
  1778. int __init msm_rpm_driver_init(void)
  1779. {
  1780. static bool registered;
  1781. if (registered)
  1782. return 0;
  1783. registered = true;
  1784. return platform_driver_register(&msm_rpm_device_driver);
  1785. }
  1786. EXPORT_SYMBOL(msm_rpm_driver_init);
  1787. arch_initcall(msm_rpm_driver_init);