glink_smem_native_xprt.c 88 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303
  1. /* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/err.h>
  14. #include <linux/fs.h>
  15. #include <linux/gfp.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/ipc_logging.h>
  19. #include <linux/irq.h>
  20. #include <linux/kernel.h>
  21. #include <linux/kthread.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/mutex.h>
  25. #include <linux/of.h>
  26. #include <linux/of_irq.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/printk.h>
  29. #include <linux/sched.h>
  30. #include <linux/seq_file.h>
  31. #include <linux/sizes.h>
  32. #include <linux/slab.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/srcu.h>
  35. #include <linux/wait.h>
  36. #include <linux/cpumask.h>
  37. #include <soc/qcom/smem.h>
  38. #include <soc/qcom/tracer_pkt.h>
  39. #include "glink_core_if.h"
  40. #include "glink_private.h"
  41. #include "glink_xprt_if.h"
  42. #define XPRT_NAME "smem"
  43. #define FIFO_FULL_RESERVE 8
  44. #define FIFO_ALIGNMENT 8
  45. #define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
  46. #define SMEM_CH_DESC_SIZE 32
  47. #define RPM_TOC_ID 0x67727430
  48. #define RPM_TX_FIFO_ID 0x61703272
  49. #define RPM_RX_FIFO_ID 0x72326170
  50. #define RPM_TOC_SIZE 256
  51. #define RPM_MAX_TOC_ENTRIES 20
  52. #define RPM_FIFO_ADDR_ALIGN_BYTES 3
  53. #define TRACER_PKT_FEATURE BIT(2)
  54. #define DEFERRED_CMDS_THRESHOLD 25
  55. #define NUM_LOG_PAGES 4
  56. /**
  57. * enum command_types - definition of the types of commands sent/received
  58. * @VERSION_CMD: Version and feature set supported
  59. * @VERSION_ACK_CMD: Response for @VERSION_CMD
  60. * @OPEN_CMD: Open a channel
  61. * @CLOSE_CMD: Close a channel
  62. * @OPEN_ACK_CMD: Response to @OPEN_CMD
  63. * @RX_INTENT_CMD: RX intent for a channel was queued
  64. * @RX_DONE_CMD: Use of RX intent for a channel is complete
  65. * @RX_INTENT_REQ_CMD: Request to have RX intent queued
  66. * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
  67. * @TX_DATA_CMD: Start of a data transfer
  68. * @ZERO_COPY_TX_DATA_CMD: Start of a data transfer with zero copy
  69. * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
  70. * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
  71. * @READ_NOTIF_CMD: Request for a notification when this cmd is read
  72. * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
  73. * @SIGNALS_CMD: Sideband signals
  74. * @TRACER_PKT_CMD: Start of a Tracer Packet Command
  75. * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
  76. */
  77. enum command_types {
  78. VERSION_CMD,
  79. VERSION_ACK_CMD,
  80. OPEN_CMD,
  81. CLOSE_CMD,
  82. OPEN_ACK_CMD,
  83. RX_INTENT_CMD,
  84. RX_DONE_CMD,
  85. RX_INTENT_REQ_CMD,
  86. RX_INTENT_REQ_ACK_CMD,
  87. TX_DATA_CMD,
  88. ZERO_COPY_TX_DATA_CMD,
  89. CLOSE_ACK_CMD,
  90. TX_DATA_CONT_CMD,
  91. READ_NOTIF_CMD,
  92. RX_DONE_W_REUSE_CMD,
  93. SIGNALS_CMD,
  94. TRACER_PKT_CMD,
  95. TRACER_PKT_CONT_CMD,
  96. };
  97. /**
  98. * struct channel_desc - description of a channel fifo with a remote entity
  99. * @read_index: The read index for the fifo where data should be
  100. * consumed from.
  101. * @write_index: The write index for the fifo where data should produced
  102. * to.
  103. *
  104. * This structure resides in SMEM and contains the control information for the
  105. * fifo data pipes of the channel. There is one physical channel between us
  106. * and a remote entity.
  107. */
  108. struct channel_desc {
  109. uint32_t read_index;
  110. uint32_t write_index;
  111. };
  112. /**
  113. * struct mailbox_config_info - description of a mailbox tranposrt channel
  114. * @tx_read_index: Offset into the tx fifo where data should be read from.
  115. * @tx_write_index: Offset into the tx fifo where new data will be placed.
  116. * @tx_size: Size of the transmit fifo in bytes.
  117. * @rx_read_index: Offset into the rx fifo where data should be read from.
  118. * @rx_write_index: Offset into the rx fifo where new data will be placed.
  119. * @rx_size: Size of the receive fifo in bytes.
  120. * @fifo: The fifos for the channel.
  121. */
  122. struct mailbox_config_info {
  123. uint32_t tx_read_index;
  124. uint32_t tx_write_index;
  125. uint32_t tx_size;
  126. uint32_t rx_read_index;
  127. uint32_t rx_write_index;
  128. uint32_t rx_size;
  129. char fifo[]; /* tx fifo, then rx fifo */
  130. };
  131. /**
  132. * struct edge_info - local information for managing a single complete edge
  133. * @xprt_if: The transport interface registered with the
  134. * glink core associated with this edge.
  135. * @xprt_cfg: The transport configuration for the glink core
  136. * assocaited with this edge.
  137. * @intentless: True if this edge runs in intentless mode.
  138. * @irq_disabled: Flag indicating the whether interrupt is enabled
  139. * or disabled.
  140. * @remote_proc_id: The SMEM processor id for the remote side.
  141. * @rx_reset_reg: Reference to the register to reset the rx irq
  142. * line, if applicable.
  143. * @out_irq_reg: Reference to the register to send an irq to the
  144. * remote side.
  145. * @out_irq_mask: Mask written to @out_irq_reg to trigger the
  146. * correct irq.
  147. * @irq_line: The incoming interrupt line.
  148. * @tx_irq_count: Number of interrupts triggered.
  149. * @rx_irq_count: Number of interrupts received.
  150. * @tx_ch_desc: Reference to the channel description structure
  151. * for tx in SMEM for this edge.
  152. * @rx_ch_desc: Reference to the channel description structure
  153. * for rx in SMEM for this edge.
  154. * @tx_fifo: Reference to the transmit fifo in SMEM.
  155. * @rx_fifo: Reference to the receive fifo in SMEM.
  156. * @tx_fifo_size: Total size of @tx_fifo.
  157. * @rx_fifo_size: Total size of @rx_fifo.
  158. * @read_from_fifo: Memcpy for this edge.
  159. * @write_to_fifo: Memcpy for this edge.
  160. * @write_lock: Lock to serialize access to @tx_fifo.
  161. * @tx_blocked_queue: Queue of entities waiting for the remote side to
  162. * signal @tx_fifo has flushed and is now empty.
  163. * @tx_resume_needed: A tx resume signal needs to be sent to the glink
  164. * core once the remote side indicates @tx_fifo has
  165. * flushed.
  166. * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
  167. * been sent, and a response is pending from the
  168. * remote side. Protected by @write_lock.
  169. * @debug_mask mask to set debugging level.
  170. * @kwork: Work to be executed when an irq is received.
  171. * @kworker: Handle to the entity processing of
  172. deferred commands.
  173. * @task: Handle to the task context used to run @kworker.
  174. * @use_ref: Active uses of this transport use this to grab
  175. * a reference. Used for ssr synchronization.
  176. * @in_ssr: Signals if this transport is in ssr.
  177. * @rx_lock: Used to serialize concurrent instances of rx
  178. * processing.
  179. * @deferred_cmds: List of deferred commands that need to be
  180. * processed in process context.
  181. * @deferred_cmds_cnt: Number of deferred commands in queue.
  182. * @rt_vote_lock: Serialize access to RT rx votes
  183. * @rt_votes: Vote count for RT rx thread priority
  184. * @num_pw_states: Size of @ramp_time_us.
  185. * @ramp_time_us: Array of ramp times in microseconds where array
  186. * index position represents a power state.
  187. * @mailbox: Mailbox transport channel description reference.
  188. * @log_ctx: Pointer to log context.
  189. */
  190. struct edge_info {
  191. struct glink_transport_if xprt_if;
  192. struct glink_core_transport_cfg xprt_cfg;
  193. bool intentless;
  194. bool irq_disabled;
  195. uint32_t remote_proc_id;
  196. void __iomem *rx_reset_reg;
  197. void __iomem *out_irq_reg;
  198. uint32_t out_irq_mask;
  199. uint32_t irq_line;
  200. uint32_t tx_irq_count;
  201. uint32_t rx_irq_count;
  202. struct channel_desc *tx_ch_desc;
  203. struct channel_desc *rx_ch_desc;
  204. void __iomem *tx_fifo;
  205. void __iomem *rx_fifo;
  206. uint32_t tx_fifo_size;
  207. uint32_t rx_fifo_size;
  208. void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
  209. void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
  210. spinlock_t write_lock;
  211. wait_queue_head_t tx_blocked_queue;
  212. bool tx_resume_needed;
  213. bool tx_blocked_signal_sent;
  214. unsigned int debug_mask;
  215. struct kthread_work kwork;
  216. struct kthread_worker kworker;
  217. struct task_struct *task;
  218. struct srcu_struct use_ref;
  219. bool in_ssr;
  220. spinlock_t rx_lock;
  221. struct list_head deferred_cmds;
  222. uint32_t deferred_cmds_cnt;
  223. spinlock_t rt_vote_lock;
  224. uint32_t rt_votes;
  225. uint32_t num_pw_states;
  226. uint32_t readback;
  227. unsigned long *ramp_time_us;
  228. struct mailbox_config_info *mailbox;
  229. void *log_ctx;
  230. };
  231. /**
  232. * struct deferred_cmd - description of a command to be processed later
  233. * @list_node: Used to put this command on a list in the edge.
  234. * @id: ID of the command.
  235. * @param1: Parameter one of the command.
  236. * @param2: Parameter two of the command.
  237. * @data: Extra data associated with the command, if applicable.
  238. *
  239. * This structure stores the relevant information of a command that was removed
  240. * from the fifo but needs to be processed at a later time.
  241. */
  242. struct deferred_cmd {
  243. struct list_head list_node;
  244. uint16_t id;
  245. uint16_t param1;
  246. uint32_t param2;
  247. void *data;
  248. };
  249. static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
  250. const struct glink_core_version *version,
  251. uint32_t features);
  252. static void register_debugfs_info(struct edge_info *einfo);
  253. static struct edge_info *edge_infos[NUM_SMEM_SUBSYSTEMS];
  254. static DEFINE_MUTEX(probe_lock);
  255. static struct glink_core_version versions[] = {
  256. {1, TRACER_PKT_FEATURE, negotiate_features_v1},
  257. };
  258. #define SMEM_IPC_LOG(einfo, str, id, param1, param2) do { \
  259. if ((glink_xprt_debug_mask & QCOM_GLINK_DEBUG_ENABLE) \
  260. && (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)) \
  261. ipc_log_string(einfo->log_ctx, \
  262. "%s: Rx:%x:%x Tx:%x:%x Cmd:%x P1:%x P2:%x\n", \
  263. str, einfo->rx_ch_desc->read_index, \
  264. einfo->rx_ch_desc->write_index, \
  265. einfo->tx_ch_desc->read_index, \
  266. einfo->tx_ch_desc->write_index, \
  267. id, param1, param2); \
  268. } while (0) \
  269. enum {
  270. QCOM_GLINK_DEBUG_ENABLE = 1U << 0,
  271. QCOM_GLINK_DEBUG_DISABLE = 1U << 1,
  272. };
  273. static unsigned int glink_xprt_debug_mask = QCOM_GLINK_DEBUG_ENABLE;
  274. module_param_named(debug_mask, glink_xprt_debug_mask,
  275. uint, 0664);
  276. /**
  277. * send_irq() - send an irq to a remote entity as an event signal
  278. * @einfo: Which remote entity that should receive the irq.
  279. */
  280. static void send_irq(struct edge_info *einfo)
  281. {
  282. /*
  283. * Any data associated with this event must be visable to the remote
  284. * before the interrupt is triggered
  285. */
  286. einfo->readback = einfo->tx_ch_desc->write_index;
  287. wmb();
  288. writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
  289. if (einfo->remote_proc_id != SMEM_SPSS)
  290. writel_relaxed(0, einfo->out_irq_reg);
  291. einfo->tx_irq_count++;
  292. }
  293. /**
  294. * read_from_fifo() - memcpy from fifo memory
  295. * @dest: Destination address.
  296. * @src: Source address.
  297. * @num_bytes: Number of bytes to copy.
  298. *
  299. * Return: Destination address.
  300. */
  301. static void *read_from_fifo(void *dest, const void *src, size_t num_bytes)
  302. {
  303. memcpy_fromio(dest, src, num_bytes);
  304. return dest;
  305. }
  306. /**
  307. * write_to_fifo() - memcpy to fifo memory
  308. * @dest: Destination address.
  309. * @src: Source address.
  310. * @num_bytes: Number of bytes to copy.
  311. *
  312. * Return: Destination address.
  313. */
  314. static void *write_to_fifo(void *dest, const void *src, size_t num_bytes)
  315. {
  316. memcpy_toio(dest, src, num_bytes);
  317. return dest;
  318. }
  319. /**
  320. * memcpy32_toio() - memcpy to word access only memory
  321. * @dest: Destination address.
  322. * @src: Source address.
  323. * @num_bytes: Number of bytes to copy.
  324. *
  325. * Return: Destination address.
  326. */
  327. static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
  328. {
  329. uint32_t *dest_local = (uint32_t *)dest;
  330. uint32_t *src_local = (uint32_t *)src;
  331. if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
  332. return ERR_PTR(-EINVAL);
  333. if (WARN_ON(!dest_local ||
  334. ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
  335. return ERR_PTR(-EINVAL);
  336. if (WARN_ON(!src_local ||
  337. ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
  338. return ERR_PTR(-EINVAL);
  339. num_bytes /= sizeof(uint32_t);
  340. while (num_bytes--)
  341. __raw_writel_no_log(*src_local++, dest_local++);
  342. return dest;
  343. }
  344. /**
  345. * memcpy32_fromio() - memcpy from word access only memory
  346. * @dest: Destination address.
  347. * @src: Source address.
  348. * @num_bytes: Number of bytes to copy.
  349. *
  350. * Return: Destination address.
  351. */
  352. static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
  353. {
  354. uint32_t *dest_local = (uint32_t *)dest;
  355. uint32_t *src_local = (uint32_t *)src;
  356. if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
  357. return ERR_PTR(-EINVAL);
  358. if (WARN_ON(!dest_local ||
  359. ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
  360. return ERR_PTR(-EINVAL);
  361. if (WARN_ON(!src_local ||
  362. ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
  363. return ERR_PTR(-EINVAL);
  364. num_bytes /= sizeof(uint32_t);
  365. while (num_bytes--)
  366. *dest_local++ = __raw_readl_no_log(src_local++);
  367. return dest;
  368. }
  369. /**
  370. * fifo_read_avail() - how many bytes are available to be read from an edge
  371. * @einfo: The concerned edge to query.
  372. *
  373. * Return: The number of bytes available to be read from edge.
  374. */
  375. static uint32_t fifo_read_avail(struct edge_info *einfo)
  376. {
  377. uint32_t read_index = einfo->rx_ch_desc->read_index;
  378. uint32_t write_index = einfo->rx_ch_desc->write_index;
  379. uint32_t fifo_size = einfo->rx_fifo_size;
  380. uint32_t bytes_avail;
  381. bytes_avail = write_index - read_index;
  382. if (write_index < read_index)
  383. /*
  384. * Case: W < R - Write has wrapped
  385. * --------------------------------
  386. * In this case, the write operation has wrapped past the end
  387. * of the FIFO which means that now calculating the amount of
  388. * data in the FIFO results in a negative number. This can be
  389. * easily fixed by adding the fifo_size to the value. Even
  390. * though the values are unsigned, subtraction is always done
  391. * using 2's complement which means that the result will still
  392. * be correct once the FIFO size has been added to the negative
  393. * result.
  394. *
  395. * Example:
  396. * '-' = data in fifo
  397. * '.' = empty
  398. *
  399. * 0 1
  400. * 0123456789012345
  401. * |-----w.....r----|
  402. * 0 N
  403. *
  404. * write = 5 = 101b
  405. * read = 11 = 1011b
  406. * Data in FIFO
  407. * (write - read) + fifo_size = (101b - 1011b) + 10000b
  408. * = 11111010b + 10000b = 1010b = 10
  409. */
  410. bytes_avail += fifo_size;
  411. return bytes_avail;
  412. }
  413. /**
  414. * fifo_write_avail() - how many bytes can be written to the edge
  415. * @einfo: The concerned edge to query.
  416. *
  417. * Calculates the number of bytes that can be transmitted at this time.
  418. * Automatically reserves some space to maintain alignment when the fifo is
  419. * completely full, and reserves space so that the flush command can always be
  420. * transmitted when needed.
  421. *
  422. * Return: The number of bytes available to be read from edge.
  423. */
  424. static uint32_t fifo_write_avail(struct edge_info *einfo)
  425. {
  426. uint32_t read_index = einfo->tx_ch_desc->read_index;
  427. uint32_t write_index = einfo->tx_ch_desc->write_index;
  428. uint32_t fifo_size = einfo->tx_fifo_size;
  429. uint32_t bytes_avail = read_index - write_index;
  430. if (read_index <= write_index)
  431. bytes_avail += fifo_size;
  432. if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
  433. bytes_avail = 0;
  434. else
  435. bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
  436. return bytes_avail;
  437. }
  438. /**
  439. * fifo_read() - read data from an edge
  440. * @einfo: The concerned edge to read from.
  441. * @_data: Buffer to copy the read data into.
  442. * @len: The ammount of data to read in bytes.
  443. *
  444. * Return: The number of bytes read.
  445. */
  446. static int fifo_read(struct edge_info *einfo, void *_data, int len)
  447. {
  448. void *ptr;
  449. void *ret;
  450. void *data = _data;
  451. int orig_len = len;
  452. uint32_t read_index = einfo->rx_ch_desc->read_index;
  453. uint32_t write_index = einfo->rx_ch_desc->write_index;
  454. uint32_t fifo_size = einfo->rx_fifo_size;
  455. uint32_t n;
  456. if (read_index >= fifo_size || write_index >= fifo_size) {
  457. WARN_ON_ONCE(1);
  458. return -EINVAL;
  459. }
  460. while (len) {
  461. ptr = einfo->rx_fifo + read_index;
  462. if (read_index <= write_index)
  463. n = write_index - read_index;
  464. else
  465. n = fifo_size - read_index;
  466. if (n == 0)
  467. break;
  468. if (n > len)
  469. n = len;
  470. ret = einfo->read_from_fifo(data, ptr, n);
  471. if (IS_ERR(ret))
  472. return PTR_ERR(ret);
  473. data += n;
  474. len -= n;
  475. read_index += n;
  476. if (read_index >= fifo_size)
  477. read_index -= fifo_size;
  478. }
  479. einfo->rx_ch_desc->read_index = read_index;
  480. return orig_len - len;
  481. }
  482. /**
  483. * fifo_write_body() - Copy transmit data into an edge
  484. * @einfo: The concerned edge to copy into.
  485. * @_data: Buffer of data to copy from.
  486. * @len: Size of data to copy in bytes.
  487. * @write_index: Index into the channel where the data should be copied.
  488. *
  489. * Return: Number of bytes remaining to be copied into the edge.
  490. */
  491. static int fifo_write_body(struct edge_info *einfo, const void *_data,
  492. int len, uint32_t *write_index)
  493. {
  494. void *ptr;
  495. void *ret;
  496. const void *data = _data;
  497. uint32_t read_index = einfo->tx_ch_desc->read_index;
  498. uint32_t fifo_size = einfo->tx_fifo_size;
  499. uint32_t n;
  500. if (read_index >= fifo_size || *write_index >= fifo_size) {
  501. WARN_ON_ONCE(1);
  502. return -EINVAL;
  503. }
  504. while (len) {
  505. ptr = einfo->tx_fifo + *write_index;
  506. if (*write_index < read_index) {
  507. n = read_index - *write_index - FIFO_FULL_RESERVE;
  508. } else {
  509. if (read_index < FIFO_FULL_RESERVE)
  510. n = fifo_size + read_index - *write_index -
  511. FIFO_FULL_RESERVE;
  512. else
  513. n = fifo_size - *write_index;
  514. }
  515. if (n == 0)
  516. break;
  517. if (n > len)
  518. n = len;
  519. ret = einfo->write_to_fifo(ptr, data, n);
  520. if (IS_ERR(ret))
  521. return PTR_ERR(ret);
  522. data += n;
  523. len -= n;
  524. *write_index += n;
  525. if (*write_index >= fifo_size)
  526. *write_index -= fifo_size;
  527. }
  528. return len;
  529. }
  530. /**
  531. * fifo_write() - Write data into an edge
  532. * @einfo: The concerned edge to write to.
  533. * @data: Buffer of data to write.
  534. * @len: Length of data to write, in bytes.
  535. *
  536. * Wrapper around fifo_write_body() to manage additional details that are
  537. * necessary for a complete write event. Does not manage concurrency. Clients
  538. * should use fifo_write_avail() to check if there is sufficent space before
  539. * calling fifo_write().
  540. *
  541. * Return: Number of bytes written to the edge.
  542. */
  543. static int fifo_write(struct edge_info *einfo, const void *data, int len)
  544. {
  545. int orig_len = len;
  546. uint32_t write_index = einfo->tx_ch_desc->write_index;
  547. len = fifo_write_body(einfo, data, len, &write_index);
  548. if (unlikely(len < 0))
  549. return len;
  550. /* All data writes need to be flushed to memory before the write index
  551. * is updated. This protects against a race condition where the remote
  552. * reads stale data because the write index was written before the data.
  553. */
  554. wmb();
  555. einfo->tx_ch_desc->write_index = write_index;
  556. send_irq(einfo);
  557. return orig_len - len;
  558. }
  559. /**
  560. * fifo_write_complex() - writes a transaction of multiple buffers to an edge
  561. * @einfo: The concerned edge to write to.
  562. * @data1: The first buffer of data to write.
  563. * @len1: The length of the first buffer in bytes.
  564. * @data2: The second buffer of data to write.
  565. * @len2: The length of the second buffer in bytes.
  566. * @data3: The thirs buffer of data to write.
  567. * @len3: The length of the third buffer in bytes.
  568. *
  569. * A variant of fifo_write() which optimizes the usecase found in tx(). The
  570. * remote side expects all or none of the transmitted data to be available.
  571. * This prevents the tx() usecase from calling fifo_write() multiple times. The
  572. * alternative would be an allocation and additional memcpy to create a buffer
  573. * to copy all the data segments into one location before calling fifo_write().
  574. *
  575. * Return: Number of bytes written to the edge.
  576. */
  577. static int fifo_write_complex(struct edge_info *einfo,
  578. const void *data1, int len1,
  579. const void *data2, int len2,
  580. const void *data3, int len3)
  581. {
  582. int orig_len = len1 + len2 + len3;
  583. uint32_t write_index = einfo->tx_ch_desc->write_index;
  584. len1 = fifo_write_body(einfo, data1, len1, &write_index);
  585. if (unlikely(len1 < 0))
  586. return len1;
  587. len2 = fifo_write_body(einfo, data2, len2, &write_index);
  588. if (unlikely(len2 < 0))
  589. return len2;
  590. len3 = fifo_write_body(einfo, data3, len3, &write_index);
  591. if (unlikely(len3 < 0))
  592. return len3;
  593. /* All data writes need to be flushed to memory before the write index
  594. * is updated. This protects against a race condition where the remote
  595. * reads stale data because the write index was written before the data.
  596. */
  597. wmb();
  598. einfo->tx_ch_desc->write_index = write_index;
  599. send_irq(einfo);
  600. return orig_len - len1 - len2 - len3;
  601. }
  602. /**
  603. * send_tx_blocked_signal() - send the flush command as we are blocked from tx
  604. * @einfo: The concerned edge which is blocked.
  605. *
  606. * Used to send a signal to the remote side that we have no more space to
  607. * transmit data and therefore need the remote side to signal us when they have
  608. * cleared some space by reading some data. This function relies upon the
  609. * assumption that fifo_write_avail() will reserve some space so that the flush
  610. * signal command can always be put into the transmit fifo, even when "everyone"
  611. * else thinks that the transmit fifo is truely full. This function assumes
  612. * that it is called with the write_lock already locked.
  613. */
  614. static void send_tx_blocked_signal(struct edge_info *einfo)
  615. {
  616. struct read_notif_request {
  617. uint16_t cmd;
  618. uint16_t reserved;
  619. uint32_t reserved2;
  620. };
  621. struct read_notif_request read_notif_req;
  622. read_notif_req.cmd = READ_NOTIF_CMD;
  623. read_notif_req.reserved = 0;
  624. read_notif_req.reserved2 = 0;
  625. SMEM_IPC_LOG(einfo, __func__, READ_NOTIF_CMD, 0, 0);
  626. if (!einfo->tx_blocked_signal_sent) {
  627. einfo->tx_blocked_signal_sent = true;
  628. fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
  629. }
  630. }
  631. /**
  632. * fifo_tx() - transmit data on an edge
  633. * @einfo: The concerned edge to transmit on.
  634. * @data: Buffer of data to transmit.
  635. * @len: Length of data to transmit in bytes.
  636. *
  637. * This helper function is the preferred interface to fifo_write() and should
  638. * be used in the normal case for transmitting entities. fifo_tx() will block
  639. * until there is sufficent room to transmit the requested ammount of data.
  640. * fifo_tx() will manage any concurrency between multiple transmitters on a
  641. * channel.
  642. *
  643. * Return: Number of bytes transmitted.
  644. */
  645. static int fifo_tx(struct edge_info *einfo, const void *data, int len)
  646. {
  647. unsigned long flags;
  648. int ret;
  649. DEFINE_WAIT(wait);
  650. spin_lock_irqsave(&einfo->write_lock, flags);
  651. while (fifo_write_avail(einfo) < len) {
  652. send_tx_blocked_signal(einfo);
  653. prepare_to_wait(&einfo->tx_blocked_queue, &wait,
  654. TASK_UNINTERRUPTIBLE);
  655. if (fifo_write_avail(einfo) < len && !einfo->in_ssr) {
  656. spin_unlock_irqrestore(&einfo->write_lock, flags);
  657. schedule();
  658. spin_lock_irqsave(&einfo->write_lock, flags);
  659. }
  660. finish_wait(&einfo->tx_blocked_queue, &wait);
  661. if (einfo->in_ssr) {
  662. spin_unlock_irqrestore(&einfo->write_lock, flags);
  663. return -EFAULT;
  664. }
  665. }
  666. ret = fifo_write(einfo, data, len);
  667. spin_unlock_irqrestore(&einfo->write_lock, flags);
  668. return ret;
  669. }
  670. /**
  671. * process_rx_data() - process received data from an edge
  672. * @einfo: The edge the data was received on.
  673. * @cmd_id: ID to specify the type of data.
  674. * @rcid: The remote channel id associated with the data.
  675. * @intend_id: The intent the data should be put in.
  676. */
  677. static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
  678. uint32_t rcid, uint32_t intent_id)
  679. {
  680. struct command {
  681. uint32_t frag_size;
  682. uint32_t size_remaining;
  683. };
  684. struct command cmd;
  685. struct glink_core_rx_intent *intent;
  686. char trash[FIFO_ALIGNMENT];
  687. int alignment;
  688. bool err = false;
  689. fifo_read(einfo, &cmd, sizeof(cmd));
  690. intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
  691. &einfo->xprt_if, rcid, intent_id);
  692. if (intent == NULL) {
  693. GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
  694. intent_id);
  695. err = true;
  696. } else if (intent->data == NULL) {
  697. if (einfo->intentless) {
  698. intent->data = kmalloc(cmd.frag_size,
  699. __GFP_ATOMIC | __GFP_HIGH);
  700. if (!intent->data) {
  701. err = true;
  702. GLINK_ERR(
  703. "%s: atomic alloc fail ch %d liid %d size %d\n",
  704. __func__, rcid, intent_id,
  705. cmd.frag_size);
  706. } else {
  707. intent->intent_size = cmd.frag_size;
  708. }
  709. } else {
  710. GLINK_ERR(
  711. "%s: intent for ch %d liid %d has no data buff\n",
  712. __func__, rcid, intent_id);
  713. err = true;
  714. }
  715. }
  716. if (!err &&
  717. (intent->intent_size - intent->write_offset < cmd.frag_size ||
  718. intent->write_offset + cmd.size_remaining > intent->intent_size)) {
  719. GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
  720. __func__,
  721. cmd.frag_size,
  722. cmd.size_remaining,
  723. "will overflow ch",
  724. rcid,
  725. "intent",
  726. intent_id);
  727. err = true;
  728. }
  729. if (err) {
  730. alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
  731. alignment -= cmd.frag_size;
  732. while (cmd.frag_size) {
  733. if (cmd.frag_size > FIFO_ALIGNMENT) {
  734. fifo_read(einfo, trash, FIFO_ALIGNMENT);
  735. cmd.frag_size -= FIFO_ALIGNMENT;
  736. } else {
  737. fifo_read(einfo, trash, cmd.frag_size);
  738. cmd.frag_size = 0;
  739. }
  740. }
  741. if (alignment)
  742. fifo_read(einfo, trash, alignment);
  743. return;
  744. }
  745. fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
  746. intent->write_offset += cmd.frag_size;
  747. intent->pkt_size += cmd.frag_size;
  748. alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
  749. alignment -= cmd.frag_size;
  750. if (alignment)
  751. fifo_read(einfo, trash, alignment);
  752. if (unlikely((cmd_id == TRACER_PKT_CMD ||
  753. cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
  754. tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
  755. intent->tracer_pkt = true;
  756. }
  757. einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
  758. rcid,
  759. intent,
  760. cmd.size_remaining ?
  761. false : true);
  762. }
  763. /**
  764. * queue_cmd() - queue a deferred command for later processing
  765. * @einfo: Edge to queue commands on.
  766. * @cmd: Command to queue.
  767. * @data: Command specific data to queue with the command.
  768. *
  769. * Return: True if queuing was successful, false otherwise.
  770. */
  771. static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
  772. {
  773. struct command {
  774. uint16_t id;
  775. uint16_t param1;
  776. uint32_t param2;
  777. };
  778. struct command *_cmd = cmd;
  779. struct deferred_cmd *d_cmd;
  780. d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
  781. if (!d_cmd) {
  782. GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
  783. return false;
  784. }
  785. d_cmd->id = _cmd->id;
  786. d_cmd->param1 = _cmd->param1;
  787. d_cmd->param2 = _cmd->param2;
  788. d_cmd->data = data;
  789. list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
  790. einfo->deferred_cmds_cnt++;
  791. kthread_queue_work(&einfo->kworker, &einfo->kwork);
  792. return true;
  793. }
  794. /**
  795. * get_rx_fifo() - Find the rx fifo for an edge
  796. * @einfo: Edge to find the fifo for.
  797. *
  798. * Return: True if fifo was found, false otherwise.
  799. */
  800. static bool get_rx_fifo(struct edge_info *einfo)
  801. {
  802. if (einfo->mailbox) {
  803. einfo->rx_fifo = &einfo->mailbox->fifo[einfo->mailbox->tx_size];
  804. einfo->rx_fifo_size = einfo->mailbox->rx_size;
  805. } else {
  806. einfo->rx_fifo = smem_get_entry(SMEM_GLINK_NATIVE_XPRT_FIFO_1,
  807. &einfo->rx_fifo_size,
  808. einfo->remote_proc_id,
  809. SMEM_ITEM_CACHED_FLAG);
  810. if (!einfo->rx_fifo)
  811. einfo->rx_fifo = smem_get_entry(
  812. SMEM_GLINK_NATIVE_XPRT_FIFO_1,
  813. &einfo->rx_fifo_size,
  814. einfo->remote_proc_id,
  815. 0);
  816. if (!einfo->rx_fifo)
  817. return false;
  818. }
  819. return true;
  820. }
  821. /**
  822. * tx_wakeup_worker() - worker function to wakeup tx blocked thread
  823. * @work: kwork associated with the edge to process commands on.
  824. */
  825. static void tx_wakeup_worker(struct edge_info *einfo)
  826. {
  827. struct glink_transport_if xprt_if = einfo->xprt_if;
  828. bool trigger_wakeup = false;
  829. bool trigger_resume = false;
  830. unsigned long flags;
  831. if (einfo->in_ssr)
  832. return;
  833. spin_lock_irqsave(&einfo->write_lock, flags);
  834. if (fifo_write_avail(einfo)) {
  835. if (einfo->tx_blocked_signal_sent)
  836. einfo->tx_blocked_signal_sent = false;
  837. if (einfo->tx_resume_needed) {
  838. einfo->tx_resume_needed = false;
  839. trigger_resume = true;
  840. }
  841. }
  842. if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
  843. trigger_wakeup = true;
  844. }
  845. spin_unlock_irqrestore(&einfo->write_lock, flags);
  846. if (trigger_wakeup)
  847. wake_up_all(&einfo->tx_blocked_queue);
  848. if (trigger_resume)
  849. xprt_if.glink_core_if_ptr->tx_resume(&xprt_if);
  850. }
  851. /**
  852. * __rx_worker() - process received commands on a specific edge
  853. * @einfo: Edge to process commands on.
  854. * @atomic_ctx: Indicates if the caller is in atomic context and requires any
  855. * non-atomic operations to be deferred.
  856. */
  857. static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
  858. {
  859. struct command {
  860. uint16_t id;
  861. uint16_t param1;
  862. uint32_t param2;
  863. };
  864. struct intent_desc {
  865. uint32_t size;
  866. uint32_t id;
  867. };
  868. struct command cmd;
  869. struct intent_desc intent;
  870. struct intent_desc *intents;
  871. int i;
  872. bool granted;
  873. unsigned long flags;
  874. int rcu_id;
  875. uint16_t rcid;
  876. uint32_t name_len;
  877. uint32_t len;
  878. char *name;
  879. char trash[FIFO_ALIGNMENT];
  880. struct deferred_cmd *d_cmd;
  881. void *cmd_data;
  882. rcu_id = srcu_read_lock(&einfo->use_ref);
  883. if (einfo->in_ssr) {
  884. srcu_read_unlock(&einfo->use_ref, rcu_id);
  885. return;
  886. }
  887. if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
  888. (waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
  889. tx_wakeup_worker(einfo);
  890. /*
  891. * Access to the fifo needs to be synchronized, however only the calls
  892. * into the core from process_rx_data() are compatible with an atomic
  893. * processing context. For everything else, we need to do all the fifo
  894. * processing, then unlock the lock for the call into the core. Data
  895. * in the fifo is allowed to be processed immediately instead of being
  896. * ordered with the commands because the channel open process prevents
  897. * intents from being queued (which prevents data from being sent) until
  898. * all the channel open commands are processed by the core, thus
  899. * eliminating a race.
  900. */
  901. spin_lock_irqsave(&einfo->rx_lock, flags);
  902. while (fifo_read_avail(einfo) ||
  903. (!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
  904. if (einfo->in_ssr)
  905. break;
  906. if (atomic_ctx && !einfo->intentless &&
  907. einfo->deferred_cmds_cnt >= DEFERRED_CMDS_THRESHOLD)
  908. break;
  909. if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
  910. d_cmd = list_first_entry(&einfo->deferred_cmds,
  911. struct deferred_cmd, list_node);
  912. list_del(&d_cmd->list_node);
  913. einfo->deferred_cmds_cnt--;
  914. cmd.id = d_cmd->id;
  915. cmd.param1 = d_cmd->param1;
  916. cmd.param2 = d_cmd->param2;
  917. cmd_data = d_cmd->data;
  918. kfree(d_cmd);
  919. SMEM_IPC_LOG(einfo, "kthread", cmd.id, cmd.param1,
  920. cmd.param2);
  921. } else {
  922. fifo_read(einfo, &cmd, sizeof(cmd));
  923. SMEM_IPC_LOG(einfo, "IRQ", cmd.id, cmd.param1,
  924. cmd.param2);
  925. cmd_data = NULL;
  926. }
  927. switch (cmd.id) {
  928. case VERSION_CMD:
  929. if (atomic_ctx) {
  930. queue_cmd(einfo, &cmd, NULL);
  931. break;
  932. }
  933. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  934. einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
  935. &einfo->xprt_if,
  936. cmd.param1,
  937. cmd.param2);
  938. spin_lock_irqsave(&einfo->rx_lock, flags);
  939. break;
  940. case VERSION_ACK_CMD:
  941. if (atomic_ctx) {
  942. queue_cmd(einfo, &cmd, NULL);
  943. break;
  944. }
  945. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  946. einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
  947. &einfo->xprt_if,
  948. cmd.param1,
  949. cmd.param2);
  950. spin_lock_irqsave(&einfo->rx_lock, flags);
  951. break;
  952. case OPEN_CMD:
  953. rcid = cmd.param1;
  954. name_len = cmd.param2;
  955. if (cmd_data) {
  956. name = cmd_data;
  957. } else {
  958. len = ALIGN(name_len, FIFO_ALIGNMENT);
  959. name = kmalloc(len, GFP_ATOMIC);
  960. if (!name) {
  961. pr_err("No memory available to rx ch open cmd name. Discarding cmd.\n");
  962. while (len) {
  963. fifo_read(einfo, trash,
  964. FIFO_ALIGNMENT);
  965. len -= FIFO_ALIGNMENT;
  966. }
  967. break;
  968. }
  969. fifo_read(einfo, name, len);
  970. }
  971. if (atomic_ctx) {
  972. if (!queue_cmd(einfo, &cmd, name))
  973. kfree(name);
  974. break;
  975. }
  976. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  977. einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
  978. &einfo->xprt_if,
  979. rcid,
  980. name,
  981. SMEM_XPRT_ID);
  982. kfree(name);
  983. spin_lock_irqsave(&einfo->rx_lock, flags);
  984. break;
  985. case CLOSE_CMD:
  986. if (atomic_ctx) {
  987. queue_cmd(einfo, &cmd, NULL);
  988. break;
  989. }
  990. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  991. einfo->xprt_if.glink_core_if_ptr->
  992. rx_cmd_ch_remote_close(
  993. &einfo->xprt_if,
  994. cmd.param1);
  995. spin_lock_irqsave(&einfo->rx_lock, flags);
  996. break;
  997. case OPEN_ACK_CMD:
  998. if (atomic_ctx) {
  999. queue_cmd(einfo, &cmd, NULL);
  1000. break;
  1001. }
  1002. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1003. einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
  1004. &einfo->xprt_if,
  1005. cmd.param1,
  1006. SMEM_XPRT_ID);
  1007. spin_lock_irqsave(&einfo->rx_lock, flags);
  1008. break;
  1009. case RX_INTENT_CMD:
  1010. /*
  1011. * One intent listed with this command. This is the
  1012. * expected case and can be optimized over the general
  1013. * case of an array of intents.
  1014. */
  1015. if (cmd.param2 == 1) {
  1016. if (cmd_data) {
  1017. intent.id = ((struct intent_desc *)
  1018. cmd_data)->id;
  1019. intent.size = ((struct intent_desc *)
  1020. cmd_data)->size;
  1021. kfree(cmd_data);
  1022. } else {
  1023. fifo_read(einfo, &intent,
  1024. sizeof(intent));
  1025. }
  1026. if (atomic_ctx) {
  1027. cmd_data = kmalloc(sizeof(intent),
  1028. GFP_ATOMIC);
  1029. if (!cmd_data) {
  1030. GLINK_ERR(
  1031. "%s: dropping cmd %d\n",
  1032. __func__, cmd.id);
  1033. break;
  1034. }
  1035. ((struct intent_desc *)cmd_data)->id =
  1036. intent.id;
  1037. ((struct intent_desc *)cmd_data)->size =
  1038. intent.size;
  1039. if (!queue_cmd(einfo, &cmd, cmd_data))
  1040. kfree(cmd_data);
  1041. break;
  1042. }
  1043. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1044. einfo->xprt_if.glink_core_if_ptr->
  1045. rx_cmd_remote_rx_intent_put(
  1046. &einfo->xprt_if,
  1047. cmd.param1,
  1048. intent.id,
  1049. intent.size);
  1050. spin_lock_irqsave(&einfo->rx_lock, flags);
  1051. break;
  1052. }
  1053. /* Array of intents to process */
  1054. if (cmd_data) {
  1055. intents = cmd_data;
  1056. } else {
  1057. intents = kmalloc_array(cmd.param2,
  1058. sizeof(*intents), GFP_ATOMIC);
  1059. if (!intents) {
  1060. for (i = 0; i < cmd.param2; ++i)
  1061. fifo_read(einfo, &intent,
  1062. sizeof(intent));
  1063. break;
  1064. }
  1065. fifo_read(einfo, intents,
  1066. sizeof(*intents) * cmd.param2);
  1067. }
  1068. if (atomic_ctx) {
  1069. if (!queue_cmd(einfo, &cmd, intents))
  1070. kfree(intents);
  1071. break;
  1072. }
  1073. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1074. for (i = 0; i < cmd.param2; ++i) {
  1075. einfo->xprt_if.glink_core_if_ptr->
  1076. rx_cmd_remote_rx_intent_put(
  1077. &einfo->xprt_if,
  1078. cmd.param1,
  1079. intents[i].id,
  1080. intents[i].size);
  1081. }
  1082. kfree(intents);
  1083. spin_lock_irqsave(&einfo->rx_lock, flags);
  1084. break;
  1085. case RX_DONE_CMD:
  1086. if (atomic_ctx) {
  1087. queue_cmd(einfo, &cmd, NULL);
  1088. break;
  1089. }
  1090. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1091. einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
  1092. &einfo->xprt_if,
  1093. cmd.param1,
  1094. cmd.param2,
  1095. false);
  1096. spin_lock_irqsave(&einfo->rx_lock, flags);
  1097. break;
  1098. case RX_INTENT_REQ_CMD:
  1099. if (atomic_ctx) {
  1100. queue_cmd(einfo, &cmd, NULL);
  1101. break;
  1102. }
  1103. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1104. einfo->xprt_if.glink_core_if_ptr->
  1105. rx_cmd_remote_rx_intent_req(
  1106. &einfo->xprt_if,
  1107. cmd.param1,
  1108. cmd.param2);
  1109. spin_lock_irqsave(&einfo->rx_lock, flags);
  1110. break;
  1111. case RX_INTENT_REQ_ACK_CMD:
  1112. if (atomic_ctx) {
  1113. queue_cmd(einfo, &cmd, NULL);
  1114. break;
  1115. }
  1116. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1117. granted = false;
  1118. if (cmd.param2 == 1)
  1119. granted = true;
  1120. einfo->xprt_if.glink_core_if_ptr->
  1121. rx_cmd_rx_intent_req_ack(
  1122. &einfo->xprt_if,
  1123. cmd.param1,
  1124. granted);
  1125. spin_lock_irqsave(&einfo->rx_lock, flags);
  1126. break;
  1127. case TX_DATA_CMD:
  1128. case TX_DATA_CONT_CMD:
  1129. case TRACER_PKT_CMD:
  1130. case TRACER_PKT_CONT_CMD:
  1131. process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
  1132. break;
  1133. case CLOSE_ACK_CMD:
  1134. if (atomic_ctx) {
  1135. queue_cmd(einfo, &cmd, NULL);
  1136. break;
  1137. }
  1138. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1139. einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
  1140. &einfo->xprt_if,
  1141. cmd.param1);
  1142. spin_lock_irqsave(&einfo->rx_lock, flags);
  1143. break;
  1144. case READ_NOTIF_CMD:
  1145. send_irq(einfo);
  1146. break;
  1147. case SIGNALS_CMD:
  1148. if (atomic_ctx) {
  1149. queue_cmd(einfo, &cmd, NULL);
  1150. break;
  1151. }
  1152. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1153. einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
  1154. &einfo->xprt_if,
  1155. cmd.param1,
  1156. cmd.param2);
  1157. spin_lock_irqsave(&einfo->rx_lock, flags);
  1158. break;
  1159. case RX_DONE_W_REUSE_CMD:
  1160. if (atomic_ctx) {
  1161. queue_cmd(einfo, &cmd, NULL);
  1162. break;
  1163. }
  1164. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1165. einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
  1166. &einfo->xprt_if,
  1167. cmd.param1,
  1168. cmd.param2,
  1169. true);
  1170. spin_lock_irqsave(&einfo->rx_lock, flags);
  1171. break;
  1172. default:
  1173. pr_err("Unrecognized command: %d\n", cmd.id);
  1174. break;
  1175. }
  1176. }
  1177. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1178. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1179. }
  1180. /**
  1181. * rx_worker() - worker function to process received commands
  1182. * @work: kwork associated with the edge to process commands on.
  1183. */
  1184. static void rx_worker(struct kthread_work *work)
  1185. {
  1186. struct edge_info *einfo;
  1187. einfo = container_of(work, struct edge_info, kwork);
  1188. __rx_worker(einfo, false);
  1189. }
  1190. irqreturn_t irq_handler(int irq, void *priv)
  1191. {
  1192. struct edge_info *einfo = (struct edge_info *)priv;
  1193. if (einfo->rx_reset_reg)
  1194. writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
  1195. __rx_worker(einfo, true);
  1196. einfo->rx_irq_count++;
  1197. return IRQ_HANDLED;
  1198. }
  1199. /**
  1200. * tx_cmd_version() - convert a version cmd to wire format and transmit
  1201. * @if_ptr: The transport to transmit on.
  1202. * @version: The version number to encode.
  1203. * @features: The features information to encode.
  1204. */
  1205. static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
  1206. uint32_t features)
  1207. {
  1208. struct command {
  1209. uint16_t id;
  1210. uint16_t version;
  1211. uint32_t features;
  1212. };
  1213. struct command cmd;
  1214. struct edge_info *einfo;
  1215. int rcu_id;
  1216. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1217. rcu_id = srcu_read_lock(&einfo->use_ref);
  1218. if (einfo->in_ssr) {
  1219. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1220. return;
  1221. }
  1222. cmd.id = VERSION_CMD;
  1223. cmd.version = version;
  1224. cmd.features = features;
  1225. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.version, cmd.features);
  1226. fifo_tx(einfo, &cmd, sizeof(cmd));
  1227. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1228. }
  1229. /**
  1230. * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
  1231. * @if_ptr: The transport to transmit on.
  1232. * @version: The version number to encode.
  1233. * @features: The features information to encode.
  1234. */
  1235. static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
  1236. uint32_t version,
  1237. uint32_t features)
  1238. {
  1239. struct command {
  1240. uint16_t id;
  1241. uint16_t version;
  1242. uint32_t features;
  1243. };
  1244. struct command cmd;
  1245. struct edge_info *einfo;
  1246. int rcu_id;
  1247. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1248. rcu_id = srcu_read_lock(&einfo->use_ref);
  1249. if (einfo->in_ssr) {
  1250. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1251. return;
  1252. }
  1253. cmd.id = VERSION_ACK_CMD;
  1254. cmd.version = version;
  1255. cmd.features = features;
  1256. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.version, cmd.features);
  1257. fifo_tx(einfo, &cmd, sizeof(cmd));
  1258. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1259. }
  1260. /**
  1261. * set_version() - activate a negotiated version and feature set
  1262. * @if_ptr: The transport to configure.
  1263. * @version: The version to use.
  1264. * @features: The features to use.
  1265. *
  1266. * Return: The supported capabilities of the transport.
  1267. */
  1268. static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
  1269. uint32_t features)
  1270. {
  1271. struct edge_info *einfo;
  1272. uint32_t ret;
  1273. int rcu_id;
  1274. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1275. rcu_id = srcu_read_lock(&einfo->use_ref);
  1276. if (einfo->in_ssr) {
  1277. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1278. return 0;
  1279. }
  1280. ret = einfo->intentless ?
  1281. GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
  1282. if (features & TRACER_PKT_FEATURE)
  1283. ret |= GCAP_TRACER_PKT;
  1284. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1285. return ret;
  1286. }
  1287. /**
  1288. * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
  1289. * @if_ptr: The transport to transmit on.
  1290. * @lcid: The local channel id to encode.
  1291. * @name: The channel name to encode.
  1292. * @req_xprt: The transport the core would like to migrate this channel to.
  1293. *
  1294. * Return: 0 on success or standard Linux error code.
  1295. */
  1296. static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
  1297. const char *name, uint16_t req_xprt)
  1298. {
  1299. struct command {
  1300. uint16_t id;
  1301. uint16_t lcid;
  1302. uint32_t length;
  1303. };
  1304. struct command cmd;
  1305. struct edge_info *einfo;
  1306. uint32_t buf_size;
  1307. void *buf;
  1308. int rcu_id;
  1309. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1310. rcu_id = srcu_read_lock(&einfo->use_ref);
  1311. if (einfo->in_ssr) {
  1312. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1313. return -EFAULT;
  1314. }
  1315. cmd.id = OPEN_CMD;
  1316. cmd.lcid = lcid;
  1317. cmd.length = strlen(name) + 1;
  1318. buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
  1319. buf = kzalloc(buf_size, GFP_KERNEL);
  1320. if (!buf) {
  1321. GLINK_ERR("%s: malloc fail for %d size buf\n",
  1322. __func__, buf_size);
  1323. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1324. return -ENOMEM;
  1325. }
  1326. memcpy(buf, &cmd, sizeof(cmd));
  1327. memcpy(buf + sizeof(cmd), name, cmd.length);
  1328. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.length);
  1329. fifo_tx(einfo, buf, buf_size);
  1330. kfree(buf);
  1331. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1332. return 0;
  1333. }
  1334. /**
  1335. * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
  1336. * @if_ptr: The transport to transmit on.
  1337. * @lcid: The local channel id to encode.
  1338. *
  1339. * Return: 0 on success or standard Linux error code.
  1340. */
  1341. static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
  1342. {
  1343. struct command {
  1344. uint16_t id;
  1345. uint16_t lcid;
  1346. uint32_t reserved;
  1347. };
  1348. struct command cmd;
  1349. struct edge_info *einfo;
  1350. int rcu_id;
  1351. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1352. rcu_id = srcu_read_lock(&einfo->use_ref);
  1353. if (einfo->in_ssr) {
  1354. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1355. return -EFAULT;
  1356. }
  1357. cmd.id = CLOSE_CMD;
  1358. cmd.lcid = lcid;
  1359. cmd.reserved = 0;
  1360. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.reserved);
  1361. fifo_tx(einfo, &cmd, sizeof(cmd));
  1362. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1363. return 0;
  1364. }
  1365. /**
  1366. * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
  1367. * and transmit
  1368. * @if_ptr: The transport to transmit on.
  1369. * @rcid: The remote channel id to encode.
  1370. * @xprt_resp: The response to a transport migration request.
  1371. */
  1372. static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
  1373. uint32_t rcid, uint16_t xprt_resp)
  1374. {
  1375. struct command {
  1376. uint16_t id;
  1377. uint16_t rcid;
  1378. uint32_t reserved;
  1379. };
  1380. struct command cmd;
  1381. struct edge_info *einfo;
  1382. int rcu_id;
  1383. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1384. rcu_id = srcu_read_lock(&einfo->use_ref);
  1385. if (einfo->in_ssr) {
  1386. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1387. return;
  1388. }
  1389. cmd.id = OPEN_ACK_CMD;
  1390. cmd.rcid = rcid;
  1391. cmd.reserved = 0;
  1392. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.rcid, cmd.reserved);
  1393. fifo_tx(einfo, &cmd, sizeof(cmd));
  1394. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1395. }
  1396. /**
  1397. * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
  1398. * and transmit
  1399. * @if_ptr: The transport to transmit on.
  1400. * @rcid: The remote channel id to encode.
  1401. */
  1402. static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
  1403. uint32_t rcid)
  1404. {
  1405. struct command {
  1406. uint16_t id;
  1407. uint16_t rcid;
  1408. uint32_t reserved;
  1409. };
  1410. struct command cmd;
  1411. struct edge_info *einfo;
  1412. int rcu_id;
  1413. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1414. rcu_id = srcu_read_lock(&einfo->use_ref);
  1415. if (einfo->in_ssr) {
  1416. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1417. return;
  1418. }
  1419. cmd.id = CLOSE_ACK_CMD;
  1420. cmd.rcid = rcid;
  1421. cmd.reserved = 0;
  1422. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.rcid, cmd.reserved);
  1423. fifo_tx(einfo, &cmd, sizeof(cmd));
  1424. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1425. }
  1426. /**
  1427. * subsys_up() - process a subsystem up notification
  1428. * @if_ptr: The transport which is up
  1429. *
  1430. */
  1431. static void subsys_up(struct glink_transport_if *if_ptr)
  1432. {
  1433. struct edge_info *einfo;
  1434. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1435. if (!einfo->rx_fifo) {
  1436. if (!get_rx_fifo(einfo))
  1437. return;
  1438. einfo->in_ssr = false;
  1439. einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
  1440. }
  1441. }
  1442. /**
  1443. * ssr() - process a subsystem restart notification of a transport
  1444. * @if_ptr: The transport to restart
  1445. *
  1446. * Return: 0 on success or standard Linux error code.
  1447. */
  1448. static int ssr(struct glink_transport_if *if_ptr)
  1449. {
  1450. struct edge_info *einfo;
  1451. struct deferred_cmd *cmd;
  1452. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1453. BUG_ON(einfo->remote_proc_id == SMEM_RPM);
  1454. einfo->in_ssr = true;
  1455. wake_up_all(&einfo->tx_blocked_queue);
  1456. synchronize_srcu(&einfo->use_ref);
  1457. while (!list_empty(&einfo->deferred_cmds)) {
  1458. cmd = list_first_entry(&einfo->deferred_cmds,
  1459. struct deferred_cmd, list_node);
  1460. list_del(&cmd->list_node);
  1461. kfree(cmd->data);
  1462. kfree(cmd);
  1463. }
  1464. einfo->tx_resume_needed = false;
  1465. einfo->tx_blocked_signal_sent = false;
  1466. einfo->rx_fifo = NULL;
  1467. einfo->rx_fifo_size = 0;
  1468. einfo->tx_ch_desc->write_index = 0;
  1469. einfo->rx_ch_desc->read_index = 0;
  1470. einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
  1471. return 0;
  1472. }
  1473. /**
  1474. * int wait_link_down() - Check status of read/write indices
  1475. * @if_ptr: The transport to check
  1476. *
  1477. * Return: 1 if indices are all zero, 0 otherwise
  1478. */
  1479. int wait_link_down(struct glink_transport_if *if_ptr)
  1480. {
  1481. struct edge_info *einfo;
  1482. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1483. if (einfo->tx_ch_desc->write_index == 0 &&
  1484. einfo->tx_ch_desc->read_index == 0 &&
  1485. einfo->rx_ch_desc->write_index == 0 &&
  1486. einfo->rx_ch_desc->read_index == 0)
  1487. return 1;
  1488. else
  1489. return 0;
  1490. }
  1491. /**
  1492. * allocate_rx_intent() - allocate/reserve space for RX Intent
  1493. * @if_ptr: The transport the intent is associated with.
  1494. * @size: size of intent.
  1495. * @intent: Pointer to the intent structure.
  1496. *
  1497. * Assign "data" with the buffer created, since the transport creates
  1498. * a linear buffer and "iovec" with the "intent" itself, so that
  1499. * the data can be passed to a client that receives only vector buffer.
  1500. * Note that returning NULL for the pointer is valid (it means that space has
  1501. * been reserved, but the actual pointer will be provided later).
  1502. *
  1503. * Return: 0 on success or standard Linux error code.
  1504. */
  1505. static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
  1506. struct glink_core_rx_intent *intent)
  1507. {
  1508. void *t;
  1509. t = kmalloc(size, GFP_KERNEL);
  1510. if (!t)
  1511. return -ENOMEM;
  1512. intent->data = t;
  1513. intent->iovec = (void *)intent;
  1514. intent->vprovider = rx_linear_vbuf_provider;
  1515. intent->pprovider = NULL;
  1516. return 0;
  1517. }
  1518. /**
  1519. * deallocate_rx_intent() - Deallocate space created for RX Intent
  1520. * @if_ptr: The transport the intent is associated with.
  1521. * @intent: Pointer to the intent structure.
  1522. *
  1523. * Return: 0 on success or standard Linux error code.
  1524. */
  1525. static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
  1526. struct glink_core_rx_intent *intent)
  1527. {
  1528. if (!intent || !intent->data)
  1529. return -EINVAL;
  1530. kfree(intent->data);
  1531. intent->data = NULL;
  1532. intent->iovec = NULL;
  1533. intent->vprovider = NULL;
  1534. return 0;
  1535. }
  1536. /**
  1537. * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
  1538. * transmit
  1539. * @if_ptr: The transport to transmit on.
  1540. * @lcid: The local channel id to encode.
  1541. * @size: The intent size to encode.
  1542. * @liid: The local intent id to encode.
  1543. *
  1544. * Return: 0 on success or standard Linux error code.
  1545. */
  1546. static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
  1547. uint32_t lcid, size_t size, uint32_t liid)
  1548. {
  1549. struct command {
  1550. uint16_t id;
  1551. uint16_t lcid;
  1552. uint32_t count;
  1553. uint32_t size;
  1554. uint32_t liid;
  1555. };
  1556. struct command cmd;
  1557. struct edge_info *einfo;
  1558. int rcu_id;
  1559. if (size > UINT_MAX) {
  1560. pr_err("%s: size %zu is too large to encode\n", __func__, size);
  1561. return -EMSGSIZE;
  1562. }
  1563. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1564. if (einfo->intentless)
  1565. return -EOPNOTSUPP;
  1566. rcu_id = srcu_read_lock(&einfo->use_ref);
  1567. if (einfo->in_ssr) {
  1568. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1569. return -EFAULT;
  1570. }
  1571. cmd.id = RX_INTENT_CMD;
  1572. cmd.lcid = lcid;
  1573. cmd.count = 1;
  1574. cmd.size = size;
  1575. cmd.liid = liid;
  1576. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.count);
  1577. fifo_tx(einfo, &cmd, sizeof(cmd));
  1578. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1579. return 0;
  1580. }
  1581. /**
  1582. * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
  1583. * @if_ptr: The transport to transmit on.
  1584. * @lcid: The local channel id to encode.
  1585. * @liid: The local intent id to encode.
  1586. * @reuse: Reuse the consumed intent.
  1587. */
  1588. static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
  1589. uint32_t lcid, uint32_t liid, bool reuse)
  1590. {
  1591. struct command {
  1592. uint16_t id;
  1593. uint16_t lcid;
  1594. uint32_t liid;
  1595. };
  1596. struct command cmd;
  1597. struct edge_info *einfo;
  1598. int rcu_id;
  1599. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1600. if (einfo->intentless)
  1601. return;
  1602. rcu_id = srcu_read_lock(&einfo->use_ref);
  1603. if (einfo->in_ssr) {
  1604. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1605. return;
  1606. }
  1607. cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
  1608. cmd.lcid = lcid;
  1609. cmd.liid = liid;
  1610. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.liid);
  1611. fifo_tx(einfo, &cmd, sizeof(cmd));
  1612. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1613. }
  1614. /**
  1615. * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
  1616. * transmit
  1617. * @if_ptr: The transport to transmit on.
  1618. * @lcid: The local channel id to encode.
  1619. * @size: The requested intent size to encode.
  1620. *
  1621. * Return: 0 on success or standard Linux error code.
  1622. */
  1623. static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
  1624. uint32_t lcid, size_t size)
  1625. {
  1626. struct command {
  1627. uint16_t id;
  1628. uint16_t lcid;
  1629. uint32_t size;
  1630. };
  1631. struct command cmd;
  1632. struct edge_info *einfo;
  1633. int rcu_id;
  1634. if (size > UINT_MAX) {
  1635. pr_err("%s: size %zu is too large to encode\n", __func__, size);
  1636. return -EMSGSIZE;
  1637. }
  1638. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1639. if (einfo->intentless)
  1640. return -EOPNOTSUPP;
  1641. rcu_id = srcu_read_lock(&einfo->use_ref);
  1642. if (einfo->in_ssr) {
  1643. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1644. return -EFAULT;
  1645. }
  1646. cmd.id = RX_INTENT_REQ_CMD,
  1647. cmd.lcid = lcid;
  1648. cmd.size = size;
  1649. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.size);
  1650. fifo_tx(einfo, &cmd, sizeof(cmd));
  1651. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1652. return 0;
  1653. }
  1654. /**
  1655. * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
  1656. * format and transmit
  1657. * @if_ptr: The transport to transmit on.
  1658. * @lcid: The local channel id to encode.
  1659. * @granted: The request response to encode.
  1660. *
  1661. * Return: 0 on success or standard Linux error code.
  1662. */
  1663. static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
  1664. uint32_t lcid, bool granted)
  1665. {
  1666. struct command {
  1667. uint16_t id;
  1668. uint16_t lcid;
  1669. uint32_t response;
  1670. };
  1671. struct command cmd;
  1672. struct edge_info *einfo;
  1673. int rcu_id;
  1674. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1675. if (einfo->intentless)
  1676. return -EOPNOTSUPP;
  1677. rcu_id = srcu_read_lock(&einfo->use_ref);
  1678. if (einfo->in_ssr) {
  1679. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1680. return -EFAULT;
  1681. }
  1682. cmd.id = RX_INTENT_REQ_ACK_CMD,
  1683. cmd.lcid = lcid;
  1684. if (granted)
  1685. cmd.response = 1;
  1686. else
  1687. cmd.response = 0;
  1688. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.response);
  1689. fifo_tx(einfo, &cmd, sizeof(cmd));
  1690. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1691. return 0;
  1692. }
  1693. /**
  1694. * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
  1695. * @if_ptr: The transport to transmit on.
  1696. * @lcid: The local channel id to encode.
  1697. * @sigs: The signals to encode.
  1698. *
  1699. * Return: 0 on success or standard Linux error code.
  1700. */
  1701. static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
  1702. uint32_t sigs)
  1703. {
  1704. struct command {
  1705. uint16_t id;
  1706. uint16_t lcid;
  1707. uint32_t sigs;
  1708. };
  1709. struct command cmd;
  1710. struct edge_info *einfo;
  1711. int rcu_id;
  1712. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1713. rcu_id = srcu_read_lock(&einfo->use_ref);
  1714. if (einfo->in_ssr) {
  1715. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1716. return -EFAULT;
  1717. }
  1718. cmd.id = SIGNALS_CMD,
  1719. cmd.lcid = lcid;
  1720. cmd.sigs = sigs;
  1721. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.sigs);
  1722. fifo_tx(einfo, &cmd, sizeof(cmd));
  1723. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1724. return 0;
  1725. }
  1726. /**
  1727. * poll() - poll for data on a channel
  1728. * @if_ptr: The transport the channel exists on.
  1729. * @lcid: The local channel id.
  1730. *
  1731. * Return: 0 if no data available, 1 if data available.
  1732. */
  1733. static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
  1734. {
  1735. struct edge_info *einfo;
  1736. int rcu_id;
  1737. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1738. rcu_id = srcu_read_lock(&einfo->use_ref);
  1739. if (einfo->in_ssr) {
  1740. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1741. return -EFAULT;
  1742. }
  1743. if (fifo_read_avail(einfo)) {
  1744. __rx_worker(einfo, true);
  1745. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1746. return 1;
  1747. }
  1748. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1749. return 0;
  1750. }
  1751. /**
  1752. * mask_rx_irq() - mask the receive irq for a channel
  1753. * @if_ptr: The transport the channel exists on.
  1754. * @lcid: The local channel id for the channel.
  1755. * @mask: True to mask the irq, false to unmask.
  1756. * @pstruct: Platform defined structure for handling the masking.
  1757. *
  1758. * Return: 0 on success or standard Linux error code.
  1759. */
  1760. static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
  1761. bool mask, void *pstruct)
  1762. {
  1763. struct edge_info *einfo;
  1764. struct irq_chip *irq_chip;
  1765. struct irq_data *irq_data;
  1766. int rcu_id;
  1767. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1768. rcu_id = srcu_read_lock(&einfo->use_ref);
  1769. if (einfo->in_ssr) {
  1770. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1771. return -EFAULT;
  1772. }
  1773. irq_chip = irq_get_chip(einfo->irq_line);
  1774. if (!irq_chip) {
  1775. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1776. return -ENODEV;
  1777. }
  1778. irq_data = irq_get_irq_data(einfo->irq_line);
  1779. if (!irq_data) {
  1780. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1781. return -ENODEV;
  1782. }
  1783. if (mask) {
  1784. irq_chip->irq_mask(irq_data);
  1785. einfo->irq_disabled = true;
  1786. if (pstruct)
  1787. irq_set_affinity(einfo->irq_line, pstruct);
  1788. } else {
  1789. irq_chip->irq_unmask(irq_data);
  1790. einfo->irq_disabled = false;
  1791. }
  1792. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1793. return 0;
  1794. }
  1795. /**
  1796. * tx_data() - convert a data/tracer_pkt to wire format and transmit
  1797. * @if_ptr: The transport to transmit on.
  1798. * @cmd_id: The command ID to transmit.
  1799. * @lcid: The local channel id to encode.
  1800. * @pctx: The data to encode.
  1801. *
  1802. * Return: Number of bytes written or standard Linux error code.
  1803. */
  1804. static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
  1805. uint32_t lcid, struct glink_core_tx_pkt *pctx)
  1806. {
  1807. struct command {
  1808. uint16_t id;
  1809. uint16_t lcid;
  1810. uint32_t riid;
  1811. uint32_t size;
  1812. uint32_t size_left;
  1813. };
  1814. struct command cmd;
  1815. struct edge_info *einfo;
  1816. uint32_t size;
  1817. uint32_t zeros_size;
  1818. const void *data_start;
  1819. char zeros[FIFO_ALIGNMENT] = { 0 };
  1820. unsigned long flags;
  1821. size_t tx_size = 0;
  1822. int rcu_id;
  1823. int ret;
  1824. if (pctx->size < pctx->size_remaining) {
  1825. GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
  1826. __func__);
  1827. pctx->size_remaining = pctx->size;
  1828. }
  1829. if (!pctx->size_remaining)
  1830. return 0;
  1831. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1832. rcu_id = srcu_read_lock(&einfo->use_ref);
  1833. if (einfo->in_ssr) {
  1834. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1835. return -EFAULT;
  1836. }
  1837. if (einfo->intentless &&
  1838. (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
  1839. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1840. return -EINVAL;
  1841. }
  1842. if (cmd_id == TX_DATA_CMD) {
  1843. if (pctx->size_remaining == pctx->size)
  1844. cmd.id = TX_DATA_CMD;
  1845. else
  1846. cmd.id = TX_DATA_CONT_CMD;
  1847. } else {
  1848. if (pctx->size_remaining == pctx->size)
  1849. cmd.id = TRACER_PKT_CMD;
  1850. else
  1851. cmd.id = TRACER_PKT_CONT_CMD;
  1852. }
  1853. cmd.lcid = lcid;
  1854. cmd.riid = pctx->riid;
  1855. data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
  1856. &tx_size);
  1857. if (!data_start) {
  1858. GLINK_ERR("%s: invalid data_start\n", __func__);
  1859. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1860. return -EINVAL;
  1861. }
  1862. spin_lock_irqsave(&einfo->write_lock, flags);
  1863. size = fifo_write_avail(einfo);
  1864. /* Intentless clients expect a complete commit or instant failure */
  1865. if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
  1866. spin_unlock_irqrestore(&einfo->write_lock, flags);
  1867. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1868. return -ENOSPC;
  1869. }
  1870. /* Need enough space to write the command and some data */
  1871. if (size <= sizeof(cmd)) {
  1872. einfo->tx_resume_needed = true;
  1873. send_tx_blocked_signal(einfo);
  1874. spin_unlock_irqrestore(&einfo->write_lock, flags);
  1875. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1876. return -EAGAIN;
  1877. }
  1878. size -= sizeof(cmd);
  1879. if (size > tx_size)
  1880. size = tx_size;
  1881. cmd.size = size;
  1882. pctx->size_remaining -= size;
  1883. cmd.size_left = pctx->size_remaining;
  1884. zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
  1885. if (cmd.id == TRACER_PKT_CMD)
  1886. tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
  1887. ret = fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size,
  1888. zeros, zeros_size);
  1889. if (ret < 0) {
  1890. spin_unlock_irqrestore(&einfo->write_lock, flags);
  1891. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1892. return ret;
  1893. }
  1894. SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.riid);
  1895. GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
  1896. "<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
  1897. cmd.size_left);
  1898. spin_unlock_irqrestore(&einfo->write_lock, flags);
  1899. /* Fake tx_done for intentless since its not supported over the wire */
  1900. if (einfo->intentless) {
  1901. spin_lock_irqsave(&einfo->rx_lock, flags);
  1902. cmd.id = RX_DONE_CMD;
  1903. cmd.lcid = pctx->rcid;
  1904. queue_cmd(einfo, &cmd, NULL);
  1905. spin_unlock_irqrestore(&einfo->rx_lock, flags);
  1906. }
  1907. srcu_read_unlock(&einfo->use_ref, rcu_id);
  1908. return cmd.size;
  1909. }
  1910. /**
  1911. * tx() - convert a data transmit cmd to wire format and transmit
  1912. * @if_ptr: The transport to transmit on.
  1913. * @lcid: The local channel id to encode.
  1914. * @pctx: The data to encode.
  1915. *
  1916. * Return: Number of bytes written or standard Linux error code.
  1917. */
  1918. static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
  1919. struct glink_core_tx_pkt *pctx)
  1920. {
  1921. return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
  1922. }
  1923. /**
  1924. * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
  1925. * @if_ptr: The transport to transmit on.
  1926. * @lcid: The local channel id to encode.
  1927. * @pctx: The data to encode.
  1928. *
  1929. * Return: Number of bytes written or standard Linux error code.
  1930. */
  1931. static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
  1932. struct glink_core_tx_pkt *pctx)
  1933. {
  1934. return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
  1935. }
  1936. /**
  1937. * get_power_vote_ramp_time() - Get the ramp time required for the power
  1938. * votes to be applied
  1939. * @if_ptr: The transport interface on which power voting is requested.
  1940. * @state: The power state for which ramp time is required.
  1941. *
  1942. * Return: The ramp time specific to the power state, standard error otherwise.
  1943. */
  1944. static unsigned long get_power_vote_ramp_time(
  1945. struct glink_transport_if *if_ptr,
  1946. uint32_t state)
  1947. {
  1948. struct edge_info *einfo;
  1949. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1950. if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
  1951. return (unsigned long)ERR_PTR(-EINVAL);
  1952. return einfo->ramp_time_us[state];
  1953. }
  1954. /**
  1955. * power_vote() - Update the power votes to meet qos requirement
  1956. * @if_ptr: The transport interface on which power voting is requested.
  1957. * @state: The power state for which the voting should be done.
  1958. *
  1959. * Return: 0 on Success, standard error otherwise.
  1960. */
  1961. static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
  1962. {
  1963. return 0;
  1964. }
  1965. /**
  1966. * power_unvote() - Remove the all the power votes
  1967. * @if_ptr: The transport interface on which power voting is requested.
  1968. *
  1969. * Return: 0 on Success, standard error otherwise.
  1970. */
  1971. static int power_unvote(struct glink_transport_if *if_ptr)
  1972. {
  1973. return 0;
  1974. }
  1975. /**
  1976. * rx_rt_vote() - Increment and RX thread RT vote
  1977. * @if_ptr: The transport interface on which power voting is requested.
  1978. *
  1979. * Return: 0 on Success, standard error otherwise.
  1980. */
  1981. static int rx_rt_vote(struct glink_transport_if *if_ptr)
  1982. {
  1983. struct edge_info *einfo;
  1984. struct sched_param param = { .sched_priority = 1 };
  1985. int ret = 0;
  1986. unsigned long flags;
  1987. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  1988. spin_lock_irqsave(&einfo->rt_vote_lock, flags);
  1989. if (!einfo->rt_votes)
  1990. ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
  1991. &param);
  1992. einfo->rt_votes++;
  1993. spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
  1994. return ret;
  1995. }
  1996. /**
  1997. * rx_rt_unvote() - Remove a RX thread RT vote
  1998. * @if_ptr: The transport interface on which power voting is requested.
  1999. *
  2000. * Return: 0 on Success, standard error otherwise.
  2001. */
  2002. static int rx_rt_unvote(struct glink_transport_if *if_ptr)
  2003. {
  2004. struct edge_info *einfo;
  2005. struct sched_param param = { .sched_priority = 0 };
  2006. int ret = 0;
  2007. unsigned long flags;
  2008. einfo = container_of(if_ptr, struct edge_info, xprt_if);
  2009. spin_lock_irqsave(&einfo->rt_vote_lock, flags);
  2010. einfo->rt_votes--;
  2011. if (!einfo->rt_votes)
  2012. ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
  2013. &param);
  2014. spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
  2015. return ret;
  2016. }
  2017. /**
  2018. * negotiate_features_v1() - determine what features of a version can be used
  2019. * @if_ptr: The transport for which features are negotiated for.
  2020. * @version: The version negotiated.
  2021. * @features: The set of requested features.
  2022. *
  2023. * Return: What set of the requested features can be supported.
  2024. */
  2025. static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
  2026. const struct glink_core_version *version,
  2027. uint32_t features)
  2028. {
  2029. return features & version->features;
  2030. }
  2031. /**
  2032. * init_xprt_if() - initialize the xprt_if for an edge
  2033. * @einfo: The edge to initialize.
  2034. */
  2035. static void init_xprt_if(struct edge_info *einfo)
  2036. {
  2037. einfo->xprt_if.tx_cmd_version = tx_cmd_version;
  2038. einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
  2039. einfo->xprt_if.set_version = set_version;
  2040. einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
  2041. einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
  2042. einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
  2043. einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
  2044. einfo->xprt_if.ssr = ssr;
  2045. einfo->xprt_if.subsys_up = subsys_up;
  2046. einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
  2047. einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
  2048. einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
  2049. einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
  2050. einfo->xprt_if.tx = tx;
  2051. einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
  2052. einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
  2053. tx_cmd_remote_rx_intent_req_ack;
  2054. einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
  2055. einfo->xprt_if.poll = poll;
  2056. einfo->xprt_if.mask_rx_irq = mask_rx_irq;
  2057. einfo->xprt_if.wait_link_down = wait_link_down;
  2058. einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
  2059. einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
  2060. einfo->xprt_if.power_vote = power_vote;
  2061. einfo->xprt_if.power_unvote = power_unvote;
  2062. einfo->xprt_if.rx_rt_vote = rx_rt_vote;
  2063. einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
  2064. }
  2065. /**
  2066. * init_xprt_cfg() - initialize the xprt_cfg for an edge
  2067. * @einfo: The edge to initialize.
  2068. * @name: The name of the remote side this edge communicates to.
  2069. */
  2070. static void init_xprt_cfg(struct edge_info *einfo, const char *name)
  2071. {
  2072. einfo->xprt_cfg.name = XPRT_NAME;
  2073. einfo->xprt_cfg.edge = name;
  2074. einfo->xprt_cfg.versions = versions;
  2075. einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
  2076. einfo->xprt_cfg.max_cid = SZ_64K;
  2077. einfo->xprt_cfg.max_iid = SZ_2G;
  2078. }
  2079. /**
  2080. * parse_qos_dt_params() - Parse the power states from DT
  2081. * @dev: Reference to the platform device for a specific edge.
  2082. * @einfo: Edge information for the edge probe function is called.
  2083. *
  2084. * Return: 0 on success, standard error code otherwise.
  2085. */
  2086. static int parse_qos_dt_params(struct device_node *node,
  2087. struct edge_info *einfo)
  2088. {
  2089. int rc;
  2090. int i;
  2091. char *key;
  2092. uint32_t *arr32;
  2093. uint32_t num_states;
  2094. key = "qcom,ramp-time";
  2095. if (!of_find_property(node, key, &num_states))
  2096. return -ENODEV;
  2097. num_states /= sizeof(uint32_t);
  2098. einfo->num_pw_states = num_states;
  2099. arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
  2100. if (!arr32)
  2101. return -ENOMEM;
  2102. einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
  2103. GFP_KERNEL);
  2104. if (!einfo->ramp_time_us) {
  2105. rc = -ENOMEM;
  2106. goto mem_alloc_fail;
  2107. }
  2108. rc = of_property_read_u32_array(node, key, arr32, num_states);
  2109. if (rc) {
  2110. rc = -ENODEV;
  2111. goto invalid_key;
  2112. }
  2113. for (i = 0; i < num_states; i++)
  2114. einfo->ramp_time_us[i] = arr32[i];
  2115. rc = 0;
  2116. kfree(arr32);
  2117. return rc;
  2118. invalid_key:
  2119. kfree(einfo->ramp_time_us);
  2120. mem_alloc_fail:
  2121. kfree(arr32);
  2122. return rc;
  2123. }
  2124. /**
  2125. * subsys_name_to_id() - translate a subsystem name to a processor id
  2126. * @name: The subsystem name to look up.
  2127. *
  2128. * Return: The processor id corresponding to @name or standard Linux error code.
  2129. */
  2130. static int subsys_name_to_id(const char *name)
  2131. {
  2132. if (!name)
  2133. return -ENODEV;
  2134. if (!strcmp(name, "apss"))
  2135. return SMEM_APPS;
  2136. if (!strcmp(name, "dsps"))
  2137. return SMEM_DSPS;
  2138. if (!strcmp(name, "lpass"))
  2139. return SMEM_Q6;
  2140. if (!strcmp(name, "mpss"))
  2141. return SMEM_MODEM;
  2142. if (!strcmp(name, "rpm"))
  2143. return SMEM_RPM;
  2144. if (!strcmp(name, "wcnss"))
  2145. return SMEM_WCNSS;
  2146. if (!strcmp(name, "spss"))
  2147. return SMEM_SPSS;
  2148. if (!strcmp(name, "cdsp"))
  2149. return SMEM_CDSP;
  2150. return -ENODEV;
  2151. }
  2152. static void glink_set_affinity(struct edge_info *einfo, u32 *arr, size_t size)
  2153. {
  2154. struct cpumask cpumask;
  2155. pid_t pid;
  2156. int i;
  2157. cpumask_clear(&cpumask);
  2158. for (i = 0; i < size; i++) {
  2159. if (arr[i] < num_possible_cpus())
  2160. cpumask_set_cpu(arr[i], &cpumask);
  2161. }
  2162. if (irq_set_affinity(einfo->irq_line, &cpumask))
  2163. pr_err("%s: Failed to set irq affinity\n", __func__);
  2164. if (sched_setaffinity(einfo->task->pid, &cpumask))
  2165. pr_err("%s: Failed to set rx cpu affinity\n", __func__);
  2166. pid = einfo->xprt_cfg.tx_task->pid;
  2167. if (sched_setaffinity(pid, &cpumask))
  2168. pr_err("%s: Failed to set tx cpu affinity\n", __func__);
  2169. }
  2170. static int glink_smem_native_probe(struct platform_device *pdev)
  2171. {
  2172. struct device_node *node;
  2173. struct device_node *phandle_node;
  2174. struct edge_info *einfo;
  2175. int rc, cpu_size;
  2176. char *key;
  2177. const char *subsys_name;
  2178. uint32_t irq_line;
  2179. uint32_t irq_mask;
  2180. struct resource *r;
  2181. u32 *cpu_array;
  2182. char log_name[GLINK_NAME_SIZE*2+7] = {0};
  2183. node = pdev->dev.of_node;
  2184. einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
  2185. if (!einfo) {
  2186. rc = -ENOMEM;
  2187. goto edge_info_alloc_fail;
  2188. }
  2189. key = "label";
  2190. subsys_name = of_get_property(node, key, NULL);
  2191. if (!subsys_name) {
  2192. pr_err("%s: missing key %s\n", __func__, key);
  2193. rc = -ENODEV;
  2194. goto missing_key;
  2195. }
  2196. key = "interrupts";
  2197. irq_line = irq_of_parse_and_map(node, 0);
  2198. if (!irq_line) {
  2199. pr_err("%s: missing key %s\n", __func__, key);
  2200. rc = -ENODEV;
  2201. goto missing_key;
  2202. }
  2203. key = "qcom,irq-mask";
  2204. rc = of_property_read_u32(node, key, &irq_mask);
  2205. if (rc) {
  2206. pr_err("%s: missing key %s\n", __func__, key);
  2207. rc = -ENODEV;
  2208. goto missing_key;
  2209. }
  2210. key = "irq-reg-base";
  2211. r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  2212. if (!r) {
  2213. pr_err("%s: missing key %s\n", __func__, key);
  2214. rc = -ENODEV;
  2215. goto missing_key;
  2216. }
  2217. if (subsys_name_to_id(subsys_name) == -ENODEV) {
  2218. pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
  2219. rc = -ENODEV;
  2220. goto invalid_key;
  2221. }
  2222. einfo->remote_proc_id = subsys_name_to_id(subsys_name);
  2223. init_xprt_cfg(einfo, subsys_name);
  2224. init_xprt_if(einfo);
  2225. spin_lock_init(&einfo->write_lock);
  2226. init_waitqueue_head(&einfo->tx_blocked_queue);
  2227. kthread_init_work(&einfo->kwork, rx_worker);
  2228. kthread_init_worker(&einfo->kworker);
  2229. einfo->read_from_fifo = read_from_fifo;
  2230. einfo->write_to_fifo = write_to_fifo;
  2231. init_srcu_struct(&einfo->use_ref);
  2232. spin_lock_init(&einfo->rx_lock);
  2233. INIT_LIST_HEAD(&einfo->deferred_cmds);
  2234. spin_lock_init(&einfo->rt_vote_lock);
  2235. einfo->rt_votes = 0;
  2236. mutex_lock(&probe_lock);
  2237. if (edge_infos[einfo->remote_proc_id]) {
  2238. pr_err("%s: duplicate subsys %s is not valid\n", __func__,
  2239. subsys_name);
  2240. rc = -ENODEV;
  2241. mutex_unlock(&probe_lock);
  2242. goto invalid_key;
  2243. }
  2244. edge_infos[einfo->remote_proc_id] = einfo;
  2245. mutex_unlock(&probe_lock);
  2246. einfo->out_irq_mask = irq_mask;
  2247. einfo->out_irq_reg = ioremap_nocache(r->start, resource_size(r));
  2248. if (!einfo->out_irq_reg) {
  2249. pr_err("%s: unable to map irq reg\n", __func__);
  2250. rc = -ENOMEM;
  2251. goto ioremap_fail;
  2252. }
  2253. einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
  2254. "smem_native_%s", subsys_name);
  2255. if (IS_ERR(einfo->task)) {
  2256. rc = PTR_ERR(einfo->task);
  2257. pr_err("%s: kthread_run failed %d\n", __func__, rc);
  2258. goto kthread_fail;
  2259. }
  2260. einfo->tx_ch_desc = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
  2261. SMEM_CH_DESC_SIZE,
  2262. einfo->remote_proc_id,
  2263. 0);
  2264. if (PTR_ERR(einfo->tx_ch_desc) == -EPROBE_DEFER) {
  2265. rc = -EPROBE_DEFER;
  2266. goto smem_alloc_fail;
  2267. }
  2268. if (!einfo->tx_ch_desc) {
  2269. pr_err("%s: smem alloc of ch descriptor failed\n", __func__);
  2270. rc = -ENOMEM;
  2271. goto smem_alloc_fail;
  2272. }
  2273. einfo->rx_ch_desc = einfo->tx_ch_desc + 1;
  2274. einfo->tx_fifo_size = SZ_16K;
  2275. einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
  2276. einfo->tx_fifo_size,
  2277. einfo->remote_proc_id,
  2278. 0);
  2279. if (!einfo->tx_fifo) {
  2280. pr_err("%s: smem alloc of tx fifo failed\n", __func__);
  2281. rc = -ENOMEM;
  2282. goto smem_alloc_fail;
  2283. }
  2284. key = "qcom,qos-config";
  2285. phandle_node = of_parse_phandle(node, key, 0);
  2286. if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
  2287. &einfo->xprt_cfg)))
  2288. parse_qos_dt_params(node, einfo);
  2289. rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
  2290. if (rc == -EPROBE_DEFER)
  2291. goto reg_xprt_fail;
  2292. if (rc) {
  2293. pr_err("%s: glink core register transport failed: %d\n",
  2294. __func__, rc);
  2295. goto reg_xprt_fail;
  2296. }
  2297. einfo->irq_line = irq_line;
  2298. rc = request_irq(irq_line, irq_handler,
  2299. IRQF_TRIGGER_RISING | IRQF_SHARED,
  2300. node->name, einfo);
  2301. if (rc < 0) {
  2302. pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
  2303. rc);
  2304. goto request_irq_fail;
  2305. }
  2306. einfo->in_ssr = true;
  2307. rc = enable_irq_wake(irq_line);
  2308. if (rc < 0)
  2309. pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
  2310. irq_line);
  2311. key = "cpu-affinity";
  2312. cpu_size = of_property_count_u32_elems(node, key);
  2313. if (cpu_size > 0) {
  2314. cpu_array = kmalloc_array(cpu_size, sizeof(u32), GFP_KERNEL);
  2315. if (!cpu_array) {
  2316. rc = -ENOMEM;
  2317. goto request_irq_fail;
  2318. }
  2319. rc = of_property_read_u32_array(node, key, cpu_array, cpu_size);
  2320. if (!rc)
  2321. glink_set_affinity(einfo, cpu_array, cpu_size);
  2322. kfree(cpu_array);
  2323. }
  2324. einfo->debug_mask = QCOM_GLINK_DEBUG_ENABLE;
  2325. snprintf(log_name, sizeof(log_name), "%s_%s_xprt",
  2326. einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
  2327. if (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)
  2328. einfo->log_ctx =
  2329. ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
  2330. if (!einfo->log_ctx)
  2331. GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
  2332. __func__, einfo->xprt_cfg.edge,
  2333. einfo->xprt_cfg.name);
  2334. register_debugfs_info(einfo);
  2335. /* fake an interrupt on this edge to see if the remote side is up */
  2336. irq_handler(0, einfo);
  2337. return 0;
  2338. request_irq_fail:
  2339. glink_core_unregister_transport(&einfo->xprt_if);
  2340. reg_xprt_fail:
  2341. smem_alloc_fail:
  2342. kthread_flush_worker(&einfo->kworker);
  2343. kthread_stop(einfo->task);
  2344. einfo->task = NULL;
  2345. kthread_fail:
  2346. iounmap(einfo->out_irq_reg);
  2347. ioremap_fail:
  2348. mutex_lock(&probe_lock);
  2349. edge_infos[einfo->remote_proc_id] = NULL;
  2350. mutex_unlock(&probe_lock);
  2351. invalid_key:
  2352. missing_key:
  2353. kfree(einfo);
  2354. edge_info_alloc_fail:
  2355. return rc;
  2356. }
  2357. static int glink_rpm_native_probe(struct platform_device *pdev)
  2358. {
  2359. struct device_node *node;
  2360. struct edge_info *einfo;
  2361. int rc;
  2362. char *key;
  2363. const char *subsys_name;
  2364. uint32_t irq_line;
  2365. uint32_t irq_mask;
  2366. struct resource *irq_r;
  2367. struct resource *msgram_r;
  2368. void __iomem *msgram;
  2369. char toc[RPM_TOC_SIZE];
  2370. uint32_t *tocp;
  2371. uint32_t num_toc_entries;
  2372. char log_name[GLINK_NAME_SIZE*2+7] = {0};
  2373. node = pdev->dev.of_node;
  2374. einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
  2375. if (!einfo) {
  2376. rc = -ENOMEM;
  2377. goto edge_info_alloc_fail;
  2378. }
  2379. subsys_name = "rpm";
  2380. key = "interrupts";
  2381. irq_line = irq_of_parse_and_map(node, 0);
  2382. if (!irq_line) {
  2383. pr_err("%s: missing key %s\n", __func__, key);
  2384. rc = -ENODEV;
  2385. goto missing_key;
  2386. }
  2387. key = "qcom,irq-mask";
  2388. rc = of_property_read_u32(node, key, &irq_mask);
  2389. if (rc) {
  2390. pr_err("%s: missing key %s\n", __func__, key);
  2391. rc = -ENODEV;
  2392. goto missing_key;
  2393. }
  2394. key = "irq-reg-base";
  2395. irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  2396. if (!irq_r) {
  2397. pr_err("%s: missing key %s\n", __func__, key);
  2398. rc = -ENODEV;
  2399. goto missing_key;
  2400. }
  2401. key = "msgram";
  2402. msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  2403. if (!msgram_r) {
  2404. pr_err("%s: missing key %s\n", __func__, key);
  2405. rc = -ENODEV;
  2406. goto missing_key;
  2407. }
  2408. if (subsys_name_to_id(subsys_name) == -ENODEV) {
  2409. pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
  2410. rc = -ENODEV;
  2411. goto invalid_key;
  2412. }
  2413. einfo->remote_proc_id = subsys_name_to_id(subsys_name);
  2414. init_xprt_cfg(einfo, subsys_name);
  2415. init_xprt_if(einfo);
  2416. spin_lock_init(&einfo->write_lock);
  2417. init_waitqueue_head(&einfo->tx_blocked_queue);
  2418. kthread_init_work(&einfo->kwork, rx_worker);
  2419. kthread_init_worker(&einfo->kworker);
  2420. einfo->intentless = true;
  2421. einfo->read_from_fifo = memcpy32_fromio;
  2422. einfo->write_to_fifo = memcpy32_toio;
  2423. init_srcu_struct(&einfo->use_ref);
  2424. spin_lock_init(&einfo->rx_lock);
  2425. INIT_LIST_HEAD(&einfo->deferred_cmds);
  2426. mutex_lock(&probe_lock);
  2427. if (edge_infos[einfo->remote_proc_id]) {
  2428. pr_err("%s: duplicate subsys %s is not valid\n", __func__,
  2429. subsys_name);
  2430. rc = -ENODEV;
  2431. mutex_unlock(&probe_lock);
  2432. goto invalid_key;
  2433. }
  2434. edge_infos[einfo->remote_proc_id] = einfo;
  2435. mutex_unlock(&probe_lock);
  2436. einfo->out_irq_mask = irq_mask;
  2437. einfo->out_irq_reg = ioremap_nocache(irq_r->start,
  2438. resource_size(irq_r));
  2439. if (!einfo->out_irq_reg) {
  2440. pr_err("%s: unable to map irq reg\n", __func__);
  2441. rc = -ENOMEM;
  2442. goto irq_ioremap_fail;
  2443. }
  2444. msgram = ioremap_nocache(msgram_r->start, resource_size(msgram_r));
  2445. if (!msgram) {
  2446. pr_err("%s: unable to map msgram\n", __func__);
  2447. rc = -ENOMEM;
  2448. goto msgram_ioremap_fail;
  2449. }
  2450. einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
  2451. "smem_native_%s", subsys_name);
  2452. if (IS_ERR(einfo->task)) {
  2453. rc = PTR_ERR(einfo->task);
  2454. pr_err("%s: kthread_run failed %d\n", __func__, rc);
  2455. goto kthread_fail;
  2456. }
  2457. memcpy32_fromio(toc, msgram + resource_size(msgram_r) - RPM_TOC_SIZE,
  2458. RPM_TOC_SIZE);
  2459. tocp = (uint32_t *)toc;
  2460. if (*tocp != RPM_TOC_ID) {
  2461. rc = -ENODEV;
  2462. pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
  2463. goto toc_init_fail;
  2464. }
  2465. ++tocp;
  2466. num_toc_entries = *tocp;
  2467. if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
  2468. rc = -ENODEV;
  2469. pr_err("%s: %d is too many toc entries\n", __func__,
  2470. num_toc_entries);
  2471. goto toc_init_fail;
  2472. }
  2473. ++tocp;
  2474. for (rc = 0; rc < num_toc_entries; ++rc) {
  2475. if (*tocp != RPM_TX_FIFO_ID) {
  2476. tocp += 3;
  2477. continue;
  2478. }
  2479. ++tocp;
  2480. einfo->tx_ch_desc = msgram + *tocp;
  2481. einfo->tx_fifo = einfo->tx_ch_desc + 1;
  2482. if ((uintptr_t)einfo->tx_fifo >
  2483. (uintptr_t)(msgram + resource_size(msgram_r))) {
  2484. pr_err("%s: invalid tx fifo address\n", __func__);
  2485. einfo->tx_fifo = NULL;
  2486. break;
  2487. }
  2488. ++tocp;
  2489. einfo->tx_fifo_size = *tocp;
  2490. if (einfo->tx_fifo_size > resource_size(msgram_r) ||
  2491. (uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
  2492. (uintptr_t)(msgram + resource_size(msgram_r))) {
  2493. pr_err("%s: invalid tx fifo size\n", __func__);
  2494. einfo->tx_fifo = NULL;
  2495. break;
  2496. }
  2497. break;
  2498. }
  2499. if (!einfo->tx_fifo) {
  2500. rc = -ENODEV;
  2501. pr_err("%s: tx fifo not found\n", __func__);
  2502. goto toc_init_fail;
  2503. }
  2504. tocp = (uint32_t *)toc;
  2505. tocp += 2;
  2506. for (rc = 0; rc < num_toc_entries; ++rc) {
  2507. if (*tocp != RPM_RX_FIFO_ID) {
  2508. tocp += 3;
  2509. continue;
  2510. }
  2511. ++tocp;
  2512. einfo->rx_ch_desc = msgram + *tocp;
  2513. einfo->rx_fifo = einfo->rx_ch_desc + 1;
  2514. if ((uintptr_t)einfo->rx_fifo >
  2515. (uintptr_t)(msgram + resource_size(msgram_r))) {
  2516. pr_err("%s: invalid rx fifo address\n", __func__);
  2517. einfo->rx_fifo = NULL;
  2518. break;
  2519. }
  2520. ++tocp;
  2521. einfo->rx_fifo_size = *tocp;
  2522. if (einfo->rx_fifo_size > resource_size(msgram_r) ||
  2523. (uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
  2524. (uintptr_t)(msgram + resource_size(msgram_r))) {
  2525. pr_err("%s: invalid rx fifo size\n", __func__);
  2526. einfo->rx_fifo = NULL;
  2527. break;
  2528. }
  2529. break;
  2530. }
  2531. if (!einfo->rx_fifo) {
  2532. rc = -ENODEV;
  2533. pr_err("%s: rx fifo not found\n", __func__);
  2534. goto toc_init_fail;
  2535. }
  2536. einfo->tx_ch_desc->write_index = 0;
  2537. einfo->rx_ch_desc->read_index = 0;
  2538. rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
  2539. if (rc == -EPROBE_DEFER)
  2540. goto reg_xprt_fail;
  2541. if (rc) {
  2542. pr_err("%s: glink core register transport failed: %d\n",
  2543. __func__, rc);
  2544. goto reg_xprt_fail;
  2545. }
  2546. einfo->irq_line = irq_line;
  2547. rc = request_irq(irq_line, irq_handler,
  2548. IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
  2549. node->name, einfo);
  2550. if (rc < 0) {
  2551. pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
  2552. rc);
  2553. goto request_irq_fail;
  2554. }
  2555. rc = enable_irq_wake(irq_line);
  2556. if (rc < 0)
  2557. pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
  2558. irq_line);
  2559. einfo->debug_mask = QCOM_GLINK_DEBUG_DISABLE;
  2560. snprintf(log_name, sizeof(log_name), "%s_%s_xprt",
  2561. einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
  2562. if (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)
  2563. einfo->log_ctx =
  2564. ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
  2565. if (!einfo->log_ctx)
  2566. GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
  2567. __func__, einfo->xprt_cfg.edge,
  2568. einfo->xprt_cfg.name);
  2569. register_debugfs_info(einfo);
  2570. einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
  2571. return 0;
  2572. request_irq_fail:
  2573. glink_core_unregister_transport(&einfo->xprt_if);
  2574. reg_xprt_fail:
  2575. toc_init_fail:
  2576. kthread_flush_worker(&einfo->kworker);
  2577. kthread_stop(einfo->task);
  2578. einfo->task = NULL;
  2579. kthread_fail:
  2580. iounmap(msgram);
  2581. msgram_ioremap_fail:
  2582. iounmap(einfo->out_irq_reg);
  2583. irq_ioremap_fail:
  2584. mutex_lock(&probe_lock);
  2585. edge_infos[einfo->remote_proc_id] = NULL;
  2586. mutex_unlock(&probe_lock);
  2587. invalid_key:
  2588. missing_key:
  2589. kfree(einfo);
  2590. edge_info_alloc_fail:
  2591. return rc;
  2592. }
  2593. static int glink_mailbox_probe(struct platform_device *pdev)
  2594. {
  2595. struct device_node *node;
  2596. struct edge_info *einfo;
  2597. int rc;
  2598. char *key;
  2599. const char *subsys_name;
  2600. uint32_t irq_line;
  2601. uint32_t irq_mask;
  2602. struct resource *irq_r;
  2603. struct resource *mbox_loc_r;
  2604. struct resource *mbox_size_r;
  2605. struct resource *rx_reset_r;
  2606. void *mbox_loc;
  2607. void *mbox_size;
  2608. struct mailbox_config_info *mbox_cfg;
  2609. uint32_t mbox_cfg_size;
  2610. phys_addr_t cfg_p_addr;
  2611. char log_name[GLINK_NAME_SIZE*2+7] = {0};
  2612. node = pdev->dev.of_node;
  2613. einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
  2614. if (!einfo) {
  2615. rc = -ENOMEM;
  2616. goto edge_info_alloc_fail;
  2617. }
  2618. key = "label";
  2619. subsys_name = of_get_property(node, key, NULL);
  2620. if (!subsys_name) {
  2621. pr_err("%s: missing key %s\n", __func__, key);
  2622. rc = -ENODEV;
  2623. goto missing_key;
  2624. }
  2625. key = "interrupts";
  2626. irq_line = irq_of_parse_and_map(node, 0);
  2627. if (!irq_line) {
  2628. pr_err("%s: missing key %s\n", __func__, key);
  2629. rc = -ENODEV;
  2630. goto missing_key;
  2631. }
  2632. key = "qcom,irq-mask";
  2633. rc = of_property_read_u32(node, key, &irq_mask);
  2634. if (rc) {
  2635. pr_err("%s: missing key %s\n", __func__, key);
  2636. rc = -ENODEV;
  2637. goto missing_key;
  2638. }
  2639. key = "irq-reg-base";
  2640. irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  2641. if (!irq_r) {
  2642. pr_err("%s: missing key %s\n", __func__, key);
  2643. rc = -ENODEV;
  2644. goto missing_key;
  2645. }
  2646. key = "mbox-loc-addr";
  2647. mbox_loc_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  2648. if (!mbox_loc_r) {
  2649. pr_err("%s: missing key %s\n", __func__, key);
  2650. rc = -ENODEV;
  2651. goto missing_key;
  2652. }
  2653. key = "mbox-loc-size";
  2654. mbox_size_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  2655. if (!mbox_size_r) {
  2656. pr_err("%s: missing key %s\n", __func__, key);
  2657. rc = -ENODEV;
  2658. goto missing_key;
  2659. }
  2660. key = "irq-rx-reset";
  2661. rx_reset_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  2662. if (!rx_reset_r) {
  2663. pr_err("%s: missing key %s\n", __func__, key);
  2664. rc = -ENODEV;
  2665. goto missing_key;
  2666. }
  2667. key = "qcom,tx-ring-size";
  2668. rc = of_property_read_u32(node, key, &einfo->tx_fifo_size);
  2669. if (rc) {
  2670. pr_err("%s: missing key %s\n", __func__, key);
  2671. rc = -ENODEV;
  2672. goto missing_key;
  2673. }
  2674. key = "qcom,rx-ring-size";
  2675. rc = of_property_read_u32(node, key, &einfo->rx_fifo_size);
  2676. if (rc) {
  2677. pr_err("%s: missing key %s\n", __func__, key);
  2678. rc = -ENODEV;
  2679. goto missing_key;
  2680. }
  2681. if (subsys_name_to_id(subsys_name) == -ENODEV) {
  2682. pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
  2683. rc = -ENODEV;
  2684. goto invalid_key;
  2685. }
  2686. einfo->remote_proc_id = subsys_name_to_id(subsys_name);
  2687. init_xprt_cfg(einfo, subsys_name);
  2688. einfo->xprt_cfg.name = "mailbox";
  2689. init_xprt_if(einfo);
  2690. spin_lock_init(&einfo->write_lock);
  2691. init_waitqueue_head(&einfo->tx_blocked_queue);
  2692. kthread_init_work(&einfo->kwork, rx_worker);
  2693. kthread_init_worker(&einfo->kworker);
  2694. einfo->read_from_fifo = read_from_fifo;
  2695. einfo->write_to_fifo = write_to_fifo;
  2696. init_srcu_struct(&einfo->use_ref);
  2697. spin_lock_init(&einfo->rx_lock);
  2698. INIT_LIST_HEAD(&einfo->deferred_cmds);
  2699. mutex_lock(&probe_lock);
  2700. if (edge_infos[einfo->remote_proc_id]) {
  2701. pr_err("%s: duplicate subsys %s is not valid\n", __func__,
  2702. subsys_name);
  2703. rc = -ENODEV;
  2704. mutex_unlock(&probe_lock);
  2705. goto invalid_key;
  2706. }
  2707. edge_infos[einfo->remote_proc_id] = einfo;
  2708. mutex_unlock(&probe_lock);
  2709. einfo->out_irq_mask = irq_mask;
  2710. einfo->out_irq_reg = ioremap_nocache(irq_r->start,
  2711. resource_size(irq_r));
  2712. if (!einfo->out_irq_reg) {
  2713. pr_err("%s: unable to map irq reg\n", __func__);
  2714. rc = -ENOMEM;
  2715. goto irq_ioremap_fail;
  2716. }
  2717. mbox_loc = ioremap_nocache(mbox_loc_r->start,
  2718. resource_size(mbox_loc_r));
  2719. if (!mbox_loc) {
  2720. pr_err("%s: unable to map mailbox location reg\n", __func__);
  2721. rc = -ENOMEM;
  2722. goto mbox_loc_ioremap_fail;
  2723. }
  2724. mbox_size = ioremap_nocache(mbox_size_r->start,
  2725. resource_size(mbox_size_r));
  2726. if (!mbox_size) {
  2727. pr_err("%s: unable to map mailbox size reg\n", __func__);
  2728. rc = -ENOMEM;
  2729. goto mbox_size_ioremap_fail;
  2730. }
  2731. einfo->rx_reset_reg = ioremap_nocache(rx_reset_r->start,
  2732. resource_size(rx_reset_r));
  2733. if (!einfo->rx_reset_reg) {
  2734. pr_err("%s: unable to map rx reset reg\n", __func__);
  2735. rc = -ENOMEM;
  2736. goto rx_reset_ioremap_fail;
  2737. }
  2738. einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
  2739. "smem_native_%s", subsys_name);
  2740. if (IS_ERR(einfo->task)) {
  2741. rc = PTR_ERR(einfo->task);
  2742. pr_err("%s: kthread_run failed %d\n", __func__, rc);
  2743. goto kthread_fail;
  2744. }
  2745. mbox_cfg_size = sizeof(*mbox_cfg) + einfo->tx_fifo_size +
  2746. einfo->rx_fifo_size;
  2747. mbox_cfg = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
  2748. mbox_cfg_size,
  2749. einfo->remote_proc_id,
  2750. 0);
  2751. if (PTR_ERR(mbox_cfg) == -EPROBE_DEFER) {
  2752. rc = -EPROBE_DEFER;
  2753. goto smem_alloc_fail;
  2754. }
  2755. if (!mbox_cfg) {
  2756. pr_err("%s: smem alloc of mailbox struct failed\n", __func__);
  2757. rc = -ENOMEM;
  2758. goto smem_alloc_fail;
  2759. }
  2760. einfo->mailbox = mbox_cfg;
  2761. einfo->tx_ch_desc = (struct channel_desc *)(&mbox_cfg->tx_read_index);
  2762. einfo->rx_ch_desc = (struct channel_desc *)(&mbox_cfg->rx_read_index);
  2763. mbox_cfg->tx_size = einfo->tx_fifo_size;
  2764. mbox_cfg->rx_size = einfo->rx_fifo_size;
  2765. einfo->tx_fifo = &mbox_cfg->fifo[0];
  2766. rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
  2767. if (rc == -EPROBE_DEFER)
  2768. goto reg_xprt_fail;
  2769. if (rc) {
  2770. pr_err("%s: glink core register transport failed: %d\n",
  2771. __func__, rc);
  2772. goto reg_xprt_fail;
  2773. }
  2774. einfo->irq_line = irq_line;
  2775. rc = request_irq(irq_line, irq_handler,
  2776. IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | IRQF_SHARED,
  2777. node->name, einfo);
  2778. if (rc < 0) {
  2779. pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
  2780. rc);
  2781. goto request_irq_fail;
  2782. }
  2783. rc = enable_irq_wake(irq_line);
  2784. if (rc < 0)
  2785. pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
  2786. irq_line);
  2787. einfo->debug_mask = QCOM_GLINK_DEBUG_DISABLE;
  2788. snprintf(log_name, sizeof(log_name), "%s_%s_xprt",
  2789. einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
  2790. if (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)
  2791. einfo->log_ctx =
  2792. ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
  2793. if (!einfo->log_ctx)
  2794. GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
  2795. __func__, einfo->xprt_cfg.edge,
  2796. einfo->xprt_cfg.name);
  2797. register_debugfs_info(einfo);
  2798. writel_relaxed(mbox_cfg_size, mbox_size);
  2799. cfg_p_addr = smem_virt_to_phys(mbox_cfg);
  2800. writel_relaxed(lower_32_bits(cfg_p_addr), mbox_loc);
  2801. writel_relaxed(upper_32_bits(cfg_p_addr), mbox_loc + 4);
  2802. einfo->in_ssr = true;
  2803. send_irq(einfo);
  2804. iounmap(mbox_size);
  2805. iounmap(mbox_loc);
  2806. return 0;
  2807. request_irq_fail:
  2808. glink_core_unregister_transport(&einfo->xprt_if);
  2809. reg_xprt_fail:
  2810. smem_alloc_fail:
  2811. kthread_flush_worker(&einfo->kworker);
  2812. kthread_stop(einfo->task);
  2813. einfo->task = NULL;
  2814. kthread_fail:
  2815. iounmap(einfo->rx_reset_reg);
  2816. rx_reset_ioremap_fail:
  2817. iounmap(mbox_size);
  2818. mbox_size_ioremap_fail:
  2819. iounmap(mbox_loc);
  2820. mbox_loc_ioremap_fail:
  2821. iounmap(einfo->out_irq_reg);
  2822. irq_ioremap_fail:
  2823. mutex_lock(&probe_lock);
  2824. edge_infos[einfo->remote_proc_id] = NULL;
  2825. mutex_unlock(&probe_lock);
  2826. invalid_key:
  2827. missing_key:
  2828. kfree(einfo);
  2829. edge_info_alloc_fail:
  2830. return rc;
  2831. }
  2832. #if defined(CONFIG_DEBUG_FS)
  2833. /**
  2834. * debug_edge() - generates formatted text output displaying current edge state
  2835. * @s: File to send the output to.
  2836. */
  2837. static void debug_edge(struct seq_file *s)
  2838. {
  2839. struct edge_info *einfo;
  2840. struct glink_dbgfs_data *dfs_d;
  2841. dfs_d = s->private;
  2842. einfo = dfs_d->priv_data;
  2843. /*
  2844. * formatted, human readable edge state output, ie:
  2845. * TX/RX fifo information:
  2846. ID|EDGE |TX READ |TX WRITE |TX SIZE |RX READ |RX WRITE |RX SIZE
  2847. -------------------------------------------------------------------------------
  2848. 01|mpss |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
  2849. *
  2850. * Interrupt information:
  2851. * EDGE |TX INT |RX INT
  2852. * --------------------------------
  2853. * mpss |0x00000006|0x00000008
  2854. */
  2855. seq_puts(s, "TX/RX fifo information:\n");
  2856. seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
  2857. "ID",
  2858. "EDGE",
  2859. "TX READ",
  2860. "TX WRITE",
  2861. "TX SIZE",
  2862. "RX READ",
  2863. "RX WRITE",
  2864. "RX SIZE");
  2865. seq_puts(s,
  2866. "-------------------------------------------------------------------------------\n");
  2867. if (!einfo)
  2868. return;
  2869. seq_printf(s, "%02i|%-10s|", einfo->remote_proc_id,
  2870. einfo->xprt_cfg.edge);
  2871. if (!einfo->rx_fifo)
  2872. seq_puts(s, "Link Not Up\n");
  2873. else
  2874. seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
  2875. einfo->tx_ch_desc->read_index,
  2876. einfo->tx_ch_desc->write_index,
  2877. einfo->tx_fifo_size,
  2878. einfo->rx_ch_desc->read_index,
  2879. einfo->rx_ch_desc->write_index,
  2880. einfo->rx_fifo_size);
  2881. seq_puts(s, "\nInterrupt information:\n");
  2882. seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
  2883. seq_puts(s, "--------------------------------\n");
  2884. seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
  2885. einfo->tx_irq_count,
  2886. einfo->rx_irq_count);
  2887. }
  2888. /**
  2889. * register_debugfs_info() - initialize debugfs device entries
  2890. * @einfo: Pointer to specific edge_info for which register is called.
  2891. */
  2892. static void register_debugfs_info(struct edge_info *einfo)
  2893. {
  2894. struct glink_dbgfs dfs;
  2895. char *curr_dir_name;
  2896. int dir_name_len;
  2897. dir_name_len = strlen(einfo->xprt_cfg.edge) +
  2898. strlen(einfo->xprt_cfg.name) + 2;
  2899. curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
  2900. if (!curr_dir_name) {
  2901. GLINK_ERR("%s: Memory allocation failed\n", __func__);
  2902. return;
  2903. }
  2904. snprintf(curr_dir_name, dir_name_len, "%s_%s",
  2905. einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
  2906. dfs.curr_name = curr_dir_name;
  2907. dfs.par_name = "xprt";
  2908. dfs.b_dir_create = false;
  2909. glink_debugfs_create("XPRT_INFO", debug_edge,
  2910. &dfs, einfo, false);
  2911. kfree(curr_dir_name);
  2912. }
  2913. #else
  2914. static void register_debugfs_info(struct edge_info *einfo)
  2915. {
  2916. }
  2917. #endif /* CONFIG_DEBUG_FS */
  2918. static const struct of_device_id smem_match_table[] = {
  2919. { .compatible = "qcom,glink-smem-native-xprt" },
  2920. {},
  2921. };
  2922. static struct platform_driver glink_smem_native_driver = {
  2923. .probe = glink_smem_native_probe,
  2924. .driver = {
  2925. .name = "msm_glink_smem_native_xprt",
  2926. .owner = THIS_MODULE,
  2927. .of_match_table = smem_match_table,
  2928. },
  2929. };
  2930. static const struct of_device_id rpm_match_table[] = {
  2931. { .compatible = "qcom,glink-rpm-native-xprt" },
  2932. {},
  2933. };
  2934. static struct platform_driver glink_rpm_native_driver = {
  2935. .probe = glink_rpm_native_probe,
  2936. .driver = {
  2937. .name = "msm_glink_rpm_native_xprt",
  2938. .owner = THIS_MODULE,
  2939. .of_match_table = rpm_match_table,
  2940. },
  2941. };
  2942. static const struct of_device_id mailbox_match_table[] = {
  2943. { .compatible = "qcom,glink-mailbox-xprt" },
  2944. {},
  2945. };
  2946. static struct platform_driver glink_mailbox_driver = {
  2947. .probe = glink_mailbox_probe,
  2948. .driver = {
  2949. .name = "msm_glink_mailbox_xprt",
  2950. .owner = THIS_MODULE,
  2951. .of_match_table = mailbox_match_table,
  2952. },
  2953. };
  2954. static int __init glink_smem_native_xprt_init(void)
  2955. {
  2956. int rc;
  2957. rc = platform_driver_register(&glink_smem_native_driver);
  2958. if (rc) {
  2959. pr_err("%s: glink_smem_native_driver register failed %d\n",
  2960. __func__, rc);
  2961. return rc;
  2962. }
  2963. rc = platform_driver_register(&glink_rpm_native_driver);
  2964. if (rc) {
  2965. pr_err("%s: glink_rpm_native_driver register failed %d\n",
  2966. __func__, rc);
  2967. return rc;
  2968. }
  2969. rc = platform_driver_register(&glink_mailbox_driver);
  2970. if (rc) {
  2971. pr_err("%s: glink_mailbox_driver register failed %d\n",
  2972. __func__, rc);
  2973. return rc;
  2974. }
  2975. return 0;
  2976. }
  2977. arch_initcall(glink_smem_native_xprt_init);
  2978. MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
  2979. MODULE_LICENSE("GPL v2");