msm_serial_hs.c 104 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814
  1. /* drivers/serial/msm_serial_hs.c
  2. *
  3. * MSM 7k High speed uart driver
  4. *
  5. * Copyright (c) 2008 Google Inc.
  6. * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
  7. * Modified: Nick Pelly <[email protected]>
  8. *
  9. * All source code in this file is licensed under the following license
  10. * except where indicated.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * version 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  19. * See the GNU General Public License for more details.
  20. *
  21. * Has optional support for uart power management independent of linux
  22. * suspend/resume:
  23. *
  24. * RX wakeup.
  25. * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
  26. * UART RX pin). This should only be used if there is not a wakeup
  27. * GPIO on the UART CTS, and the first RX byte is known (for example, with the
  28. * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
  29. * always be lost. RTS will be asserted even while the UART is off in this mode
  30. * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/serial.h>
  34. #include <linux/serial_core.h>
  35. #include <linux/slab.h>
  36. #include <linux/init.h>
  37. #include <linux/interrupt.h>
  38. #include <linux/irq.h>
  39. #include <linux/io.h>
  40. #include <linux/ioport.h>
  41. #include <linux/atomic.h>
  42. #include <linux/kernel.h>
  43. #include <linux/timer.h>
  44. #include <linux/clk.h>
  45. #include <linux/delay.h>
  46. #include <linux/platform_device.h>
  47. #include <linux/pm_runtime.h>
  48. #include <linux/dma-mapping.h>
  49. #include <linux/tty_flip.h>
  50. #include <linux/wait.h>
  51. #include <linux/sysfs.h>
  52. #include <linux/stat.h>
  53. #include <linux/device.h>
  54. #include <linux/debugfs.h>
  55. #include <linux/of.h>
  56. #include <linux/of_device.h>
  57. #include <linux/of_gpio.h>
  58. #include <linux/gpio.h>
  59. #include <linux/ipc_logging.h>
  60. #include <asm/irq.h>
  61. #include <linux/kthread.h>
  62. #include <uapi/linux/sched.h>
  63. #include <linux/msm-sps.h>
  64. #include <linux/platform_data/msm_serial_hs.h>
  65. #include <linux/msm-bus.h>
  66. #include "msm_serial_hs_hwreg.h"
  67. #define UART_SPS_CONS_PERIPHERAL 0
  68. #define UART_SPS_PROD_PERIPHERAL 1
  69. #define IPC_MSM_HS_LOG_STATE_PAGES 2
  70. #define IPC_MSM_HS_LOG_USER_PAGES 2
  71. #define IPC_MSM_HS_LOG_DATA_PAGES 3
  72. #define UART_DMA_DESC_NR 8
  73. #define BUF_DUMP_SIZE 32
  74. /* If the debug_mask gets set to FATAL_LEV,
  75. * a fatal error has happened and further IPC logging
  76. * is disabled so that this problem can be detected
  77. */
  78. enum {
  79. FATAL_LEV = 0U,
  80. ERR_LEV = 1U,
  81. WARN_LEV = 2U,
  82. INFO_LEV = 3U,
  83. DBG_LEV = 4U,
  84. };
  85. #define MSM_HS_DBG(x...) do { \
  86. if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
  87. if (msm_uport->ipc_msm_hs_log_ctxt) \
  88. ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
  89. } \
  90. } while (0)
  91. #define MSM_HS_INFO(x...) do { \
  92. if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
  93. if (msm_uport->ipc_msm_hs_log_ctxt) \
  94. ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
  95. } \
  96. } while (0)
  97. /* warnings and errors show up on console always */
  98. #define MSM_HS_WARN(x...) do { \
  99. pr_warn(x); \
  100. if (msm_uport->ipc_msm_hs_log_ctxt && \
  101. msm_uport->ipc_debug_mask >= WARN_LEV) \
  102. ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
  103. } while (0)
  104. /* ERROR condition in the driver sets the hs_serial_debug_mask
  105. * to ERR_FATAL level, so that this message can be seen
  106. * in IPC logging. Further errors continue to log on the console
  107. */
  108. #define MSM_HS_ERR(x...) do { \
  109. pr_err(x); \
  110. if (msm_uport->ipc_msm_hs_log_ctxt && \
  111. msm_uport->ipc_debug_mask >= ERR_LEV) { \
  112. ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
  113. msm_uport->ipc_debug_mask = FATAL_LEV; \
  114. } \
  115. } while (0)
  116. #define LOG_USR_MSG(ctx, x...) do { \
  117. if (ctx) \
  118. ipc_log_string(ctx, x); \
  119. } while (0)
  120. /*
  121. * There are 3 different kind of UART Core available on MSM.
  122. * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
  123. * and BSLP based HSUART.
  124. */
  125. enum uart_core_type {
  126. LEGACY_HSUART,
  127. GSBI_HSUART,
  128. BLSP_HSUART,
  129. };
  130. enum flush_reason {
  131. FLUSH_NONE,
  132. FLUSH_DATA_READY,
  133. FLUSH_DATA_INVALID, /* values after this indicate invalid data */
  134. FLUSH_IGNORE,
  135. FLUSH_STOP,
  136. FLUSH_SHUTDOWN,
  137. };
  138. /*
  139. * SPS data structures to support HSUART with BAM
  140. * @sps_pipe - This struct defines BAM pipe descriptor
  141. * @sps_connect - This struct defines a connection's end point
  142. * @sps_register - This struct defines a event registration parameters
  143. */
  144. struct msm_hs_sps_ep_conn_data {
  145. struct sps_pipe *pipe_handle;
  146. struct sps_connect config;
  147. struct sps_register_event event;
  148. };
  149. struct msm_hs_tx {
  150. bool dma_in_flight; /* tx dma in progress */
  151. enum flush_reason flush;
  152. wait_queue_head_t wait;
  153. int tx_count;
  154. dma_addr_t dma_base;
  155. struct kthread_work kwork;
  156. struct kthread_worker kworker;
  157. struct task_struct *task;
  158. struct msm_hs_sps_ep_conn_data cons;
  159. struct timer_list tx_timeout_timer;
  160. void *ipc_tx_ctxt;
  161. };
  162. struct msm_hs_rx {
  163. enum flush_reason flush;
  164. wait_queue_head_t wait;
  165. dma_addr_t rbuffer;
  166. unsigned char *buffer;
  167. unsigned int buffer_pending;
  168. struct delayed_work flip_insert_work;
  169. struct kthread_work kwork;
  170. struct kthread_worker kworker;
  171. struct task_struct *task;
  172. struct msm_hs_sps_ep_conn_data prod;
  173. unsigned long queued_flag;
  174. unsigned long pending_flag;
  175. int rx_inx;
  176. struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
  177. void *ipc_rx_ctxt;
  178. };
  179. enum buffer_states {
  180. NONE_PENDING = 0x0,
  181. FIFO_OVERRUN = 0x1,
  182. PARITY_ERROR = 0x2,
  183. CHARS_NORMAL = 0x4,
  184. };
  185. enum msm_hs_pm_state {
  186. MSM_HS_PM_ACTIVE,
  187. MSM_HS_PM_SUSPENDED,
  188. MSM_HS_PM_SYS_SUSPENDED,
  189. };
  190. /* optional low power wakeup, typically on a GPIO RX irq */
  191. struct msm_hs_wakeup {
  192. int irq; /* < 0 indicates low power wakeup disabled */
  193. unsigned char ignore; /* bool */
  194. /* bool: inject char into rx tty on wakeup */
  195. bool inject_rx;
  196. unsigned char rx_to_inject;
  197. bool enabled;
  198. bool freed;
  199. };
  200. struct msm_hs_port {
  201. struct uart_port uport;
  202. unsigned long imr_reg; /* shadow value of UARTDM_IMR */
  203. struct clk *clk;
  204. struct clk *pclk;
  205. struct msm_hs_tx tx;
  206. struct msm_hs_rx rx;
  207. atomic_t resource_count;
  208. struct msm_hs_wakeup wakeup;
  209. struct dentry *loopback_dir;
  210. struct work_struct clock_off_w; /* work for actual clock off */
  211. struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
  212. struct mutex mtx; /* resource access mutex */
  213. enum uart_core_type uart_type;
  214. unsigned long bam_handle;
  215. resource_size_t bam_mem;
  216. int bam_irq;
  217. unsigned char __iomem *bam_base;
  218. unsigned int bam_tx_ep_pipe_index;
  219. unsigned int bam_rx_ep_pipe_index;
  220. /* struct sps_event_notify is an argument passed when triggering a
  221. * callback event object registered for an SPS connection end point.
  222. */
  223. struct sps_event_notify notify;
  224. /* bus client handler */
  225. u32 bus_perf_client;
  226. /* BLSP UART required BUS Scaling data */
  227. struct msm_bus_scale_pdata *bus_scale_table;
  228. bool rx_bam_inprogress;
  229. wait_queue_head_t bam_disconnect_wait;
  230. bool use_pinctrl;
  231. struct pinctrl *pinctrl;
  232. struct pinctrl_state *gpio_state_active;
  233. struct pinctrl_state *gpio_state_suspend;
  234. bool flow_control;
  235. enum msm_hs_pm_state pm_state;
  236. atomic_t client_count;
  237. bool obs; /* out of band sleep flag */
  238. atomic_t client_req_state;
  239. void *ipc_msm_hs_log_ctxt;
  240. void *ipc_msm_hs_pwr_ctxt;
  241. int ipc_debug_mask;
  242. };
  243. static const struct of_device_id msm_hs_match_table[] = {
  244. { .compatible = "qcom,msm-hsuart-v14"},
  245. {}
  246. };
  247. #define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
  248. #define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
  249. #define UARTDM_RX_BUF_SIZE 512
  250. #define RETRY_TIMEOUT 5
  251. #define UARTDM_NR 256
  252. #define BAM_PIPE_MIN 0
  253. #define BAM_PIPE_MAX 11
  254. #define BUS_SCALING 1
  255. #define BUS_RESET 0
  256. #define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
  257. #define BLSP_UART_CLK_FMAX 63160000
  258. static struct dentry *debug_base;
  259. static struct platform_driver msm_serial_hs_platform_driver;
  260. static struct uart_driver msm_hs_driver;
  261. static const struct uart_ops msm_hs_ops;
  262. static void msm_hs_start_rx_locked(struct uart_port *uport);
  263. static void msm_serial_hs_rx_work(struct kthread_work *work);
  264. static void flip_insert_work(struct work_struct *work);
  265. static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
  266. static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
  267. static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
  268. static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
  269. static int msm_hs_pm_resume(struct device *dev);
  270. #define UARTDM_TO_MSM(uart_port) \
  271. container_of((uart_port), struct msm_hs_port, uport)
  272. static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
  273. unsigned long arg)
  274. {
  275. int ret = 0, state = 1;
  276. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  277. if (!msm_uport)
  278. return -ENODEV;
  279. switch (cmd) {
  280. case MSM_ENABLE_UART_CLOCK: {
  281. ret = msm_hs_request_clock_on(&msm_uport->uport);
  282. break;
  283. }
  284. case MSM_DISABLE_UART_CLOCK: {
  285. ret = msm_hs_request_clock_off(&msm_uport->uport);
  286. break;
  287. }
  288. case MSM_GET_UART_CLOCK_STATUS: {
  289. /* Return value 0 - UART CLOCK is OFF
  290. * Return value 1 - UART CLOCK is ON
  291. */
  292. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
  293. state = 0;
  294. ret = state;
  295. MSM_HS_INFO("%s():GET UART CLOCK STATUS: cmd=%d state=%d\n",
  296. __func__, cmd, state);
  297. break;
  298. }
  299. default: {
  300. MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
  301. cmd);
  302. ret = -ENOIOCTLCMD;
  303. break;
  304. }
  305. }
  306. return ret;
  307. }
  308. /*
  309. * This function is called initially during probe and then
  310. * through the runtime PM framework. The function directly calls
  311. * resource APIs to enable them.
  312. */
  313. static int msm_hs_clk_bus_vote(struct msm_hs_port *msm_uport)
  314. {
  315. int rc = 0;
  316. msm_hs_bus_voting(msm_uport, BUS_SCALING);
  317. /* Turn on core clk and iface clk */
  318. if (msm_uport->pclk) {
  319. rc = clk_prepare_enable(msm_uport->pclk);
  320. if (rc) {
  321. dev_err(msm_uport->uport.dev,
  322. "%s: Could not turn on pclk [%d]\n",
  323. __func__, rc);
  324. goto busreset;
  325. }
  326. }
  327. rc = clk_prepare_enable(msm_uport->clk);
  328. if (rc) {
  329. dev_err(msm_uport->uport.dev,
  330. "%s: Could not turn on core clk [%d]\n",
  331. __func__, rc);
  332. goto core_unprepare;
  333. }
  334. MSM_HS_DBG("%s: Clock ON successful\n", __func__);
  335. return rc;
  336. core_unprepare:
  337. clk_disable_unprepare(msm_uport->pclk);
  338. busreset:
  339. msm_hs_bus_voting(msm_uport, BUS_RESET);
  340. return rc;
  341. }
  342. /*
  343. * This function is called initially during probe and then
  344. * through the runtime PM framework. The function directly calls
  345. * resource apis to disable them.
  346. */
  347. static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
  348. {
  349. clk_disable_unprepare(msm_uport->clk);
  350. if (msm_uport->pclk)
  351. clk_disable_unprepare(msm_uport->pclk);
  352. msm_hs_bus_voting(msm_uport, BUS_RESET);
  353. MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
  354. }
  355. /* Remove vote for resources when done */
  356. static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
  357. {
  358. struct uart_port *uport = &(msm_uport->uport);
  359. int rc = atomic_read(&msm_uport->resource_count);
  360. MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
  361. if (rc <= 0) {
  362. MSM_HS_WARN("%s(): rc zero, bailing\n", __func__);
  363. WARN_ON(1);
  364. return;
  365. }
  366. atomic_dec(&msm_uport->resource_count);
  367. pm_runtime_mark_last_busy(uport->dev);
  368. pm_runtime_put_autosuspend(uport->dev);
  369. }
  370. /* Vote for resources before accessing them */
  371. static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
  372. {
  373. int ret;
  374. struct uart_port *uport = &(msm_uport->uport);
  375. ret = pm_runtime_get_sync(uport->dev);
  376. if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
  377. MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
  378. __func__, dev_name(uport->dev), ret,
  379. msm_uport->pm_state);
  380. msm_hs_pm_resume(uport->dev);
  381. }
  382. atomic_inc(&msm_uport->resource_count);
  383. }
  384. /* Check if the uport line number matches with user id stored in pdata.
  385. * User id information is stored during initialization. This function
  386. * ensues that the same device is selected
  387. */
  388. static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
  389. {
  390. struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
  391. struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
  392. if ((!msm_uport) || (msm_uport->uport.line != pdev->id
  393. && msm_uport->uport.line != pdata->userid)) {
  394. pr_err("uport line number mismatch!");
  395. WARN_ON(1);
  396. return NULL;
  397. }
  398. return msm_uport;
  399. }
  400. static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
  401. char *buf)
  402. {
  403. int state = 1;
  404. ssize_t ret = 0;
  405. struct platform_device *pdev = container_of(dev, struct
  406. platform_device, dev);
  407. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  408. /* This check should not fail */
  409. if (msm_uport) {
  410. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
  411. state = 0;
  412. ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
  413. }
  414. return ret;
  415. }
  416. static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
  417. const char *buf, size_t count)
  418. {
  419. int state;
  420. ssize_t ret = 0;
  421. struct platform_device *pdev = container_of(dev, struct
  422. platform_device, dev);
  423. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  424. /* This check should not fail */
  425. if (msm_uport) {
  426. state = buf[0] - '0';
  427. switch (state) {
  428. case 0:
  429. MSM_HS_DBG("%s: Request clock OFF\n", __func__);
  430. msm_hs_request_clock_off(&msm_uport->uport);
  431. ret = count;
  432. break;
  433. case 1:
  434. MSM_HS_DBG("%s: Request clock ON\n", __func__);
  435. msm_hs_request_clock_on(&msm_uport->uport);
  436. ret = count;
  437. break;
  438. default:
  439. ret = -EINVAL;
  440. }
  441. }
  442. return ret;
  443. }
  444. static DEVICE_ATTR(clock, 0644, show_clock, set_clock);
  445. static ssize_t show_debug_mask(struct device *dev,
  446. struct device_attribute *attr, char *buf)
  447. {
  448. ssize_t ret = 0;
  449. struct platform_device *pdev = container_of(dev, struct
  450. platform_device, dev);
  451. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  452. /* This check should not fail */
  453. if (msm_uport)
  454. ret = snprintf(buf, sizeof(int), "%u\n",
  455. msm_uport->ipc_debug_mask);
  456. return ret;
  457. }
  458. static ssize_t set_debug_mask(struct device *dev,
  459. struct device_attribute *attr,
  460. const char *buf, size_t count)
  461. {
  462. struct platform_device *pdev = container_of(dev, struct
  463. platform_device, dev);
  464. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  465. /* This check should not fail */
  466. if (msm_uport) {
  467. msm_uport->ipc_debug_mask = buf[0] - '0';
  468. if (msm_uport->ipc_debug_mask < FATAL_LEV ||
  469. msm_uport->ipc_debug_mask > DBG_LEV) {
  470. /* set to default level */
  471. msm_uport->ipc_debug_mask = INFO_LEV;
  472. MSM_HS_ERR("Range is 0 to 4;Set to default level 3\n");
  473. return -EINVAL;
  474. }
  475. }
  476. return count;
  477. }
  478. static DEVICE_ATTR(debug_mask, 0644, show_debug_mask,
  479. set_debug_mask);
  480. static inline bool is_use_low_power_wakeup(struct msm_hs_port *msm_uport)
  481. {
  482. return msm_uport->wakeup.irq > 0;
  483. }
  484. static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
  485. {
  486. int ret;
  487. if (msm_uport->bus_perf_client) {
  488. MSM_HS_DBG("Bus voting:%d\n", vote);
  489. ret = msm_bus_scale_client_update_request(
  490. msm_uport->bus_perf_client, vote);
  491. if (ret)
  492. MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
  493. __func__, vote);
  494. }
  495. }
  496. static inline unsigned int msm_hs_read(struct uart_port *uport,
  497. unsigned int index)
  498. {
  499. return readl_relaxed(uport->membase + index);
  500. }
  501. static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
  502. unsigned int value)
  503. {
  504. writel_relaxed(value, uport->membase + index);
  505. }
  506. static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
  507. {
  508. struct sps_connect config;
  509. int ret;
  510. ret = sps_get_config(sps_pipe_handler, &config);
  511. if (ret) {
  512. pr_err("%s: sps_get_config() failed ret %d\n", __func__, ret);
  513. return ret;
  514. }
  515. config.options |= SPS_O_POLL;
  516. ret = sps_set_config(sps_pipe_handler, &config);
  517. if (ret) {
  518. pr_err("%s: sps_set_config() failed ret %d\n", __func__, ret);
  519. return ret;
  520. }
  521. return sps_disconnect(sps_pipe_handler);
  522. }
  523. static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
  524. char *prefix, char *string, u64 addr, int size)
  525. {
  526. char buf[(BUF_DUMP_SIZE * 3) + 2];
  527. int len = 0;
  528. len = min(size, BUF_DUMP_SIZE);
  529. /*
  530. * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
  531. * don't include the ASCII text at the end of the buffer.
  532. */
  533. hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
  534. ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
  535. (unsigned int)addr, size, buf);
  536. }
  537. /*
  538. * This API read and provides UART Core registers information.
  539. */
  540. static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
  541. {
  542. struct uart_port *uport = &(msm_uport->uport);
  543. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
  544. MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
  545. __func__, atomic_read(&msm_uport->resource_count));
  546. return;
  547. }
  548. MSM_HS_DBG(
  549. "MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
  550. msm_hs_read(uport, UART_DM_MR1),
  551. msm_hs_read(uport, UART_DM_MR2),
  552. msm_hs_read(uport, UART_DM_TFWR),
  553. msm_hs_read(uport, UART_DM_RFWR),
  554. msm_hs_read(uport, UART_DM_DMEN),
  555. msm_hs_read(uport, UART_DM_IMR),
  556. msm_hs_read(uport, UART_DM_MISR),
  557. msm_hs_read(uport, UART_DM_NCF_TX));
  558. MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
  559. msm_hs_read(uport, UART_DM_SR),
  560. msm_hs_read(uport, UART_DM_ISR),
  561. msm_hs_read(uport, UART_DM_DMRX),
  562. msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
  563. msm_hs_read(uport, UART_DM_TXFS),
  564. msm_hs_read(uport, UART_DM_RXFS));
  565. MSM_HS_DBG("rx.flush:%u\n", msm_uport->rx.flush);
  566. }
  567. static int msm_serial_loopback_enable_set(void *data, u64 val)
  568. {
  569. struct msm_hs_port *msm_uport = data;
  570. struct uart_port *uport = &(msm_uport->uport);
  571. unsigned long flags;
  572. int ret = 0;
  573. msm_hs_resource_vote(msm_uport);
  574. if (val) {
  575. spin_lock_irqsave(&uport->lock, flags);
  576. ret = msm_hs_read(uport, UART_DM_MR2);
  577. ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
  578. UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
  579. msm_hs_write(uport, UART_DM_MR2, ret);
  580. spin_unlock_irqrestore(&uport->lock, flags);
  581. } else {
  582. spin_lock_irqsave(&uport->lock, flags);
  583. ret = msm_hs_read(uport, UART_DM_MR2);
  584. ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
  585. UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
  586. msm_hs_write(uport, UART_DM_MR2, ret);
  587. spin_unlock_irqrestore(&uport->lock, flags);
  588. }
  589. /* Calling CLOCK API. Hence mb() requires here. */
  590. mb();
  591. msm_hs_resource_unvote(msm_uport);
  592. return 0;
  593. }
  594. static int msm_serial_loopback_enable_get(void *data, u64 *val)
  595. {
  596. struct msm_hs_port *msm_uport = data;
  597. struct uart_port *uport = &(msm_uport->uport);
  598. unsigned long flags;
  599. int ret = 0;
  600. msm_hs_resource_vote(msm_uport);
  601. spin_lock_irqsave(&uport->lock, flags);
  602. ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
  603. spin_unlock_irqrestore(&uport->lock, flags);
  604. msm_hs_resource_unvote(msm_uport);
  605. *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
  606. return 0;
  607. }
  608. DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
  609. msm_serial_loopback_enable_set, "%llu\n");
  610. /*
  611. * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
  612. * writing 1 turns on internal loopback mode in HW. Useful for automation
  613. * test scripts.
  614. * writing 0 disables the internal loopback mode. Default is disabled.
  615. */
  616. static void msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
  617. int id)
  618. {
  619. char node_name[15];
  620. snprintf(node_name, sizeof(node_name), "loopback.%d", id);
  621. msm_uport->loopback_dir = debugfs_create_file(node_name,
  622. 0644,
  623. debug_base,
  624. msm_uport,
  625. &loopback_enable_fops);
  626. if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
  627. MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry",
  628. __func__, id);
  629. }
  630. static int msm_hs_remove(struct platform_device *pdev)
  631. {
  632. struct msm_hs_port *msm_uport;
  633. struct device *dev;
  634. if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
  635. pr_err("Invalid plaform device ID = %d\n", pdev->id);
  636. return -EINVAL;
  637. }
  638. msm_uport = get_matching_hs_port(pdev);
  639. if (!msm_uport)
  640. return -EINVAL;
  641. dev = msm_uport->uport.dev;
  642. sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
  643. sysfs_remove_file(&pdev->dev.kobj, &dev_attr_debug_mask.attr);
  644. debugfs_remove(msm_uport->loopback_dir);
  645. dma_free_coherent(msm_uport->uport.dev,
  646. UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
  647. msm_uport->rx.buffer, msm_uport->rx.rbuffer);
  648. msm_uport->rx.buffer = NULL;
  649. msm_uport->rx.rbuffer = 0;
  650. destroy_workqueue(msm_uport->hsuart_wq);
  651. mutex_destroy(&msm_uport->mtx);
  652. uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
  653. clk_put(msm_uport->clk);
  654. if (msm_uport->pclk)
  655. clk_put(msm_uport->pclk);
  656. iounmap(msm_uport->uport.membase);
  657. return 0;
  658. }
  659. /* Connect a UART peripheral's SPS endpoint(consumer endpoint)
  660. *
  661. * Also registers a SPS callback function for the consumer
  662. * process with the SPS driver
  663. *
  664. * @uport - Pointer to uart uport structure
  665. *
  666. * @return - 0 if successful else negative value.
  667. *
  668. */
  669. static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
  670. {
  671. int ret;
  672. struct uart_port *uport = &msm_uport->uport;
  673. struct msm_hs_tx *tx = &msm_uport->tx;
  674. struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
  675. struct sps_connect *sps_config = &tx->cons.config;
  676. struct sps_register_event *sps_event = &tx->cons.event;
  677. unsigned long flags;
  678. unsigned int data;
  679. if (tx->flush != FLUSH_SHUTDOWN) {
  680. MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
  681. return 0;
  682. }
  683. /* Establish connection between peripheral and memory endpoint */
  684. ret = sps_connect(sps_pipe_handle, sps_config);
  685. if (ret) {
  686. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
  687. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  688. return ret;
  689. }
  690. /* Register callback event for EOT (End of transfer) event. */
  691. ret = sps_register_event(sps_pipe_handle, sps_event);
  692. if (ret) {
  693. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
  694. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  695. goto reg_event_err;
  696. }
  697. spin_lock_irqsave(&(msm_uport->uport.lock), flags);
  698. msm_uport->tx.flush = FLUSH_STOP;
  699. spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
  700. data = msm_hs_read(uport, UART_DM_DMEN);
  701. /* Enable UARTDM Tx BAM Interface */
  702. data |= UARTDM_TX_BAM_ENABLE_BMSK;
  703. msm_hs_write(uport, UART_DM_DMEN, data);
  704. msm_hs_write(uport, UART_DM_CR, RESET_TX);
  705. msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
  706. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
  707. MSM_HS_DBG("%s(): TX Connect", __func__);
  708. return 0;
  709. reg_event_err:
  710. sps_disconnect(sps_pipe_handle);
  711. return ret;
  712. }
  713. /* Connect a UART peripheral's SPS endpoint(producer endpoint)
  714. *
  715. * Also registers a SPS callback function for the producer
  716. * process with the SPS driver
  717. *
  718. * @uport - Pointer to uart uport structure
  719. *
  720. * @return - 0 if successful else negative value.
  721. *
  722. */
  723. static int msm_hs_spsconnect_rx(struct uart_port *uport)
  724. {
  725. int ret;
  726. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  727. struct msm_hs_rx *rx = &msm_uport->rx;
  728. struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
  729. struct sps_connect *sps_config = &rx->prod.config;
  730. struct sps_register_event *sps_event = &rx->prod.event;
  731. unsigned long flags;
  732. /* Establish connection between peripheral and memory endpoint */
  733. ret = sps_connect(sps_pipe_handle, sps_config);
  734. if (ret) {
  735. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
  736. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  737. return ret;
  738. }
  739. /* Register callback event for DESC_DONE event. */
  740. ret = sps_register_event(sps_pipe_handle, sps_event);
  741. if (ret) {
  742. MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
  743. "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
  744. goto reg_event_err;
  745. }
  746. spin_lock_irqsave(&uport->lock, flags);
  747. if (msm_uport->rx.pending_flag)
  748. MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
  749. __func__, msm_uport->rx.pending_flag);
  750. msm_uport->rx.queued_flag = 0;
  751. msm_uport->rx.pending_flag = 0;
  752. msm_uport->rx.rx_inx = 0;
  753. msm_uport->rx.flush = FLUSH_STOP;
  754. spin_unlock_irqrestore(&uport->lock, flags);
  755. MSM_HS_DBG("%s(): RX Connect\n", __func__);
  756. return 0;
  757. reg_event_err:
  758. sps_disconnect(sps_pipe_handle);
  759. return ret;
  760. }
  761. /*
  762. * programs the UARTDM_CSR register with correct bit rates
  763. *
  764. * Interrupts should be disabled before we are called, as
  765. * we modify Set Baud rate
  766. * Set receive stale interrupt level, dependent on Bit Rate
  767. * Goal is to have around 8 ms before indicate stale.
  768. * roundup (((Bit Rate * .008) / 10) + 1
  769. */
  770. static void msm_hs_set_bps_locked(struct uart_port *uport,
  771. unsigned int bps)
  772. {
  773. unsigned long rxstale;
  774. unsigned long data;
  775. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  776. switch (bps) {
  777. case 300:
  778. msm_hs_write(uport, UART_DM_CSR, 0x00);
  779. rxstale = 1;
  780. break;
  781. case 600:
  782. msm_hs_write(uport, UART_DM_CSR, 0x11);
  783. rxstale = 1;
  784. break;
  785. case 1200:
  786. msm_hs_write(uport, UART_DM_CSR, 0x22);
  787. rxstale = 1;
  788. break;
  789. case 2400:
  790. msm_hs_write(uport, UART_DM_CSR, 0x33);
  791. rxstale = 1;
  792. break;
  793. case 4800:
  794. msm_hs_write(uport, UART_DM_CSR, 0x44);
  795. rxstale = 1;
  796. break;
  797. case 9600:
  798. msm_hs_write(uport, UART_DM_CSR, 0x55);
  799. rxstale = 2;
  800. break;
  801. case 14400:
  802. msm_hs_write(uport, UART_DM_CSR, 0x66);
  803. rxstale = 3;
  804. break;
  805. case 19200:
  806. msm_hs_write(uport, UART_DM_CSR, 0x77);
  807. rxstale = 4;
  808. break;
  809. case 28800:
  810. msm_hs_write(uport, UART_DM_CSR, 0x88);
  811. rxstale = 6;
  812. break;
  813. case 38400:
  814. msm_hs_write(uport, UART_DM_CSR, 0x99);
  815. rxstale = 8;
  816. break;
  817. case 57600:
  818. msm_hs_write(uport, UART_DM_CSR, 0xaa);
  819. rxstale = 16;
  820. break;
  821. case 76800:
  822. msm_hs_write(uport, UART_DM_CSR, 0xbb);
  823. rxstale = 16;
  824. break;
  825. case 115200:
  826. msm_hs_write(uport, UART_DM_CSR, 0xcc);
  827. rxstale = 31;
  828. break;
  829. case 230400:
  830. msm_hs_write(uport, UART_DM_CSR, 0xee);
  831. rxstale = 31;
  832. break;
  833. case 460800:
  834. msm_hs_write(uport, UART_DM_CSR, 0xff);
  835. rxstale = 31;
  836. break;
  837. case 4000000:
  838. case 3686400:
  839. case 3200000:
  840. case 3500000:
  841. case 3000000:
  842. case 2500000:
  843. case 2000000:
  844. case 1500000:
  845. case 1152000:
  846. case 1000000:
  847. case 921600:
  848. msm_hs_write(uport, UART_DM_CSR, 0xff);
  849. rxstale = 31;
  850. break;
  851. default:
  852. msm_hs_write(uport, UART_DM_CSR, 0xff);
  853. /* default to 9600 */
  854. bps = 9600;
  855. rxstale = 2;
  856. break;
  857. }
  858. /*
  859. * uart baud rate depends on CSR and MND Values
  860. * we are updating CSR before and then calling
  861. * clk_set_rate which updates MND Values. Hence
  862. * dsb requires here.
  863. */
  864. mb();
  865. if (bps > 460800) {
  866. uport->uartclk = bps * 16;
  867. /* BLSP based UART supports maximum clock frequency
  868. * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
  869. * UART can support baud rate of 3.94 Mbps which is
  870. * equivalent to 4 Mbps.
  871. * UART hardware is robust enough to handle this
  872. * deviation to achieve baud rate ~4 Mbps.
  873. */
  874. if (bps == 4000000)
  875. uport->uartclk = BLSP_UART_CLK_FMAX;
  876. } else {
  877. uport->uartclk = 7372800;
  878. }
  879. if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
  880. MSM_HS_WARN("Error setting clock rate on UART\n");
  881. WARN_ON(1);
  882. }
  883. data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
  884. data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
  885. msm_hs_write(uport, UART_DM_IPR, data);
  886. /*
  887. * It is suggested to do reset of transmitter and receiver after
  888. * changing any protocol configuration. Here Baud rate and stale
  889. * timeout are getting updated. Hence reset transmitter and receiver.
  890. */
  891. msm_hs_write(uport, UART_DM_CR, RESET_TX);
  892. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  893. }
  894. static void msm_hs_set_std_bps_locked(struct uart_port *uport,
  895. unsigned int bps)
  896. {
  897. unsigned long rxstale;
  898. unsigned long data;
  899. switch (bps) {
  900. case 9600:
  901. msm_hs_write(uport, UART_DM_CSR, 0x99);
  902. rxstale = 2;
  903. break;
  904. case 14400:
  905. msm_hs_write(uport, UART_DM_CSR, 0xaa);
  906. rxstale = 3;
  907. break;
  908. case 19200:
  909. msm_hs_write(uport, UART_DM_CSR, 0xbb);
  910. rxstale = 4;
  911. break;
  912. case 28800:
  913. msm_hs_write(uport, UART_DM_CSR, 0xcc);
  914. rxstale = 6;
  915. break;
  916. case 38400:
  917. msm_hs_write(uport, UART_DM_CSR, 0xdd);
  918. rxstale = 8;
  919. break;
  920. case 57600:
  921. msm_hs_write(uport, UART_DM_CSR, 0xee);
  922. rxstale = 16;
  923. break;
  924. case 115200:
  925. msm_hs_write(uport, UART_DM_CSR, 0xff);
  926. rxstale = 31;
  927. break;
  928. default:
  929. msm_hs_write(uport, UART_DM_CSR, 0x99);
  930. /* default to 9600 */
  931. bps = 9600;
  932. rxstale = 2;
  933. break;
  934. }
  935. data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
  936. data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
  937. msm_hs_write(uport, UART_DM_IPR, data);
  938. }
  939. static void msm_hs_enable_flow_control(struct uart_port *uport, bool override)
  940. {
  941. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  942. unsigned int data;
  943. if (msm_uport->flow_control || override) {
  944. /* Enable RFR line */
  945. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  946. /* Enable auto RFR */
  947. data = msm_hs_read(uport, UART_DM_MR1);
  948. data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
  949. msm_hs_write(uport, UART_DM_MR1, data);
  950. /* Ensure register IO completion */
  951. mb();
  952. }
  953. }
  954. static void msm_hs_disable_flow_control(struct uart_port *uport, bool override)
  955. {
  956. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  957. unsigned int data;
  958. /*
  959. * Clear the Rx Ready Ctl bit - This ensures that
  960. * flow control lines stop the other side from sending
  961. * data while we change the parameters
  962. */
  963. if (msm_uport->flow_control || override) {
  964. data = msm_hs_read(uport, UART_DM_MR1);
  965. /* disable auto ready-for-receiving */
  966. data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
  967. msm_hs_write(uport, UART_DM_MR1, data);
  968. /* Disable RFR line */
  969. msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
  970. /* Ensure register IO completion */
  971. mb();
  972. }
  973. }
  974. /*
  975. * termios : new ktermios
  976. * oldtermios: old ktermios previous setting
  977. *
  978. * Configure the serial port
  979. */
  980. static void msm_hs_set_termios(struct uart_port *uport,
  981. struct ktermios *termios,
  982. struct ktermios *oldtermios)
  983. {
  984. unsigned int bps;
  985. unsigned long data;
  986. unsigned int c_cflag = termios->c_cflag;
  987. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  988. /**
  989. * set_termios can be invoked from the framework when
  990. * the clocks are off and the client has not had a chance
  991. * to turn them on. Make sure that they are on
  992. */
  993. msm_hs_resource_vote(msm_uport);
  994. mutex_lock(&msm_uport->mtx);
  995. msm_hs_write(uport, UART_DM_IMR, 0);
  996. msm_hs_disable_flow_control(uport, true);
  997. /*
  998. * Disable Rx channel of UARTDM
  999. * DMA Rx Stall happens if enqueue and flush of Rx command happens
  1000. * concurrently. Hence before changing the baud rate/protocol
  1001. * configuration and sending flush command to ADM, disable the Rx
  1002. * channel of UARTDM.
  1003. * Note: should not reset the receiver here immediately as it is not
  1004. * suggested to do disable/reset or reset/disable at the same time.
  1005. */
  1006. data = msm_hs_read(uport, UART_DM_DMEN);
  1007. /* Disable UARTDM RX BAM Interface */
  1008. data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
  1009. msm_hs_write(uport, UART_DM_DMEN, data);
  1010. /*
  1011. * Reset RX and TX.
  1012. * Resetting the RX enables it, therefore we must reset and disable.
  1013. */
  1014. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  1015. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
  1016. msm_hs_write(uport, UART_DM_CR, RESET_TX);
  1017. /* 300 is the minimum baud support by the driver */
  1018. bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
  1019. /* Temporary remapping 200 BAUD to 3.2 mbps */
  1020. if (bps == 200)
  1021. bps = 3200000;
  1022. uport->uartclk = clk_get_rate(msm_uport->clk);
  1023. if (!uport->uartclk)
  1024. msm_hs_set_std_bps_locked(uport, bps);
  1025. else
  1026. msm_hs_set_bps_locked(uport, bps);
  1027. data = msm_hs_read(uport, UART_DM_MR2);
  1028. data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
  1029. /* set parity */
  1030. if (c_cflag & PARENB) {
  1031. if (c_cflag & PARODD)
  1032. data |= ODD_PARITY;
  1033. else if (c_cflag & CMSPAR)
  1034. data |= SPACE_PARITY;
  1035. else
  1036. data |= EVEN_PARITY;
  1037. }
  1038. /* Set bits per char */
  1039. data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
  1040. switch (c_cflag & CSIZE) {
  1041. case CS5:
  1042. data |= FIVE_BPC;
  1043. break;
  1044. case CS6:
  1045. data |= SIX_BPC;
  1046. break;
  1047. case CS7:
  1048. data |= SEVEN_BPC;
  1049. break;
  1050. default:
  1051. data |= EIGHT_BPC;
  1052. break;
  1053. }
  1054. /* stop bits */
  1055. if (c_cflag & CSTOPB) {
  1056. data |= STOP_BIT_TWO;
  1057. } else {
  1058. /* otherwise 1 stop bit */
  1059. data |= STOP_BIT_ONE;
  1060. }
  1061. data |= UARTDM_MR2_ERROR_MODE_BMSK;
  1062. /* write parity/bits per char/stop bit configuration */
  1063. msm_hs_write(uport, UART_DM_MR2, data);
  1064. uport->ignore_status_mask = termios->c_iflag & INPCK;
  1065. uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
  1066. uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
  1067. uport->read_status_mask = (termios->c_cflag & CREAD);
  1068. /* Set Transmit software time out */
  1069. uart_update_timeout(uport, c_cflag, bps);
  1070. /* Enable UARTDM Rx BAM Interface */
  1071. data = msm_hs_read(uport, UART_DM_DMEN);
  1072. data |= UARTDM_RX_BAM_ENABLE_BMSK;
  1073. msm_hs_write(uport, UART_DM_DMEN, data);
  1074. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
  1075. /* Issue TX,RX BAM Start IFC command */
  1076. msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
  1077. msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
  1078. /* Ensure Register Writes Complete */
  1079. mb();
  1080. /* Configure HW flow control
  1081. * UART Core would see status of CTS line when it is sending data
  1082. * to remote uart to confirm that it can receive or not.
  1083. * UART Core would trigger RFR if it is not having any space with
  1084. * RX FIFO.
  1085. */
  1086. /* Pulling RFR line high */
  1087. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  1088. data = msm_hs_read(uport, UART_DM_MR1);
  1089. data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
  1090. if (c_cflag & CRTSCTS) {
  1091. data |= UARTDM_MR1_CTS_CTL_BMSK;
  1092. data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
  1093. msm_uport->flow_control = true;
  1094. }
  1095. msm_hs_write(uport, UART_DM_MR1, data);
  1096. MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
  1097. mutex_unlock(&msm_uport->mtx);
  1098. msm_hs_resource_unvote(msm_uport);
  1099. }
  1100. /*
  1101. * Standard API, Transmitter
  1102. * Any character in the transmit shift register is sent
  1103. */
  1104. unsigned int msm_hs_tx_empty(struct uart_port *uport)
  1105. {
  1106. unsigned int data;
  1107. unsigned int isr;
  1108. unsigned int ret = 0;
  1109. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1110. msm_hs_resource_vote(msm_uport);
  1111. data = msm_hs_read(uport, UART_DM_SR);
  1112. isr = msm_hs_read(uport, UART_DM_ISR);
  1113. msm_hs_resource_unvote(msm_uport);
  1114. MSM_HS_INFO("%s(): SR:0x%x ISR:0x%x ", __func__, data, isr);
  1115. if (data & UARTDM_SR_TXEMT_BMSK) {
  1116. ret = TIOCSER_TEMT;
  1117. } else
  1118. /*
  1119. * Add an extra sleep here because sometimes the framework's
  1120. * delay (based on baud rate) isn't good enough.
  1121. * Note that this won't happen during every port close, only
  1122. * on select occassions when the userspace does back to back
  1123. * write() and close().
  1124. */
  1125. usleep_range(5000, 7000);
  1126. return ret;
  1127. }
  1128. EXPORT_SYMBOL(msm_hs_tx_empty);
  1129. /*
  1130. * Standard API, Stop transmitter.
  1131. * Any character in the transmit shift register is sent as
  1132. * well as the current data mover transfer .
  1133. */
  1134. static void msm_hs_stop_tx_locked(struct uart_port *uport)
  1135. {
  1136. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1137. struct msm_hs_tx *tx = &msm_uport->tx;
  1138. tx->flush = FLUSH_STOP;
  1139. }
  1140. static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
  1141. {
  1142. struct msm_hs_rx *rx = &msm_uport->rx;
  1143. struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
  1144. int ret = 0;
  1145. ret = sps_rx_disconnect(sps_pipe_handle);
  1146. if (msm_uport->rx.pending_flag)
  1147. MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
  1148. __func__, msm_uport->rx.pending_flag);
  1149. MSM_HS_DBG("%s(): clearing desc usage flag", __func__);
  1150. msm_uport->rx.queued_flag = 0;
  1151. msm_uport->rx.pending_flag = 0;
  1152. msm_uport->rx.rx_inx = 0;
  1153. if (ret)
  1154. MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
  1155. msm_uport->rx.flush = FLUSH_SHUTDOWN;
  1156. MSM_HS_DBG("%s: Calling Completion\n", __func__);
  1157. wake_up(&msm_uport->bam_disconnect_wait);
  1158. MSM_HS_DBG("%s: Done Completion\n", __func__);
  1159. wake_up(&msm_uport->rx.wait);
  1160. return ret;
  1161. }
  1162. static int sps_tx_disconnect(struct msm_hs_port *msm_uport)
  1163. {
  1164. struct uart_port *uport = &msm_uport->uport;
  1165. struct msm_hs_tx *tx = &msm_uport->tx;
  1166. struct sps_pipe *tx_pipe = tx->cons.pipe_handle;
  1167. unsigned long flags;
  1168. int ret = 0;
  1169. if (msm_uport->tx.flush == FLUSH_SHUTDOWN) {
  1170. MSM_HS_DBG("%s(): pipe already disonnected", __func__);
  1171. return ret;
  1172. }
  1173. ret = sps_disconnect(tx_pipe);
  1174. if (ret) {
  1175. MSM_HS_ERR("%s(): sps_disconnect failed %d", __func__, ret);
  1176. return ret;
  1177. }
  1178. spin_lock_irqsave(&uport->lock, flags);
  1179. msm_uport->tx.flush = FLUSH_SHUTDOWN;
  1180. spin_unlock_irqrestore(&uport->lock, flags);
  1181. MSM_HS_DBG("%s(): TX Disconnect", __func__);
  1182. return ret;
  1183. }
  1184. static void msm_hs_disable_rx(struct uart_port *uport)
  1185. {
  1186. unsigned int data;
  1187. data = msm_hs_read(uport, UART_DM_DMEN);
  1188. data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
  1189. msm_hs_write(uport, UART_DM_DMEN, data);
  1190. }
  1191. /*
  1192. * Standard API, Stop receiver as soon as possible.
  1193. *
  1194. * Function immediately terminates the operation of the
  1195. * channel receiver and any incoming characters are lost. None
  1196. * of the receiver status bits are affected by this command and
  1197. * characters that are already in the receive FIFO there.
  1198. */
  1199. static void msm_hs_stop_rx_locked(struct uart_port *uport)
  1200. {
  1201. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1202. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
  1203. MSM_HS_WARN("%s(): Clocks are off\n", __func__);
  1204. else
  1205. msm_hs_disable_rx(uport);
  1206. if (msm_uport->rx.flush == FLUSH_NONE)
  1207. msm_uport->rx.flush = FLUSH_STOP;
  1208. }
  1209. static void msm_hs_disconnect_rx(struct uart_port *uport)
  1210. {
  1211. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1212. msm_hs_disable_rx(uport);
  1213. /* Disconnect the BAM RX pipe */
  1214. if (msm_uport->rx.flush == FLUSH_NONE)
  1215. msm_uport->rx.flush = FLUSH_STOP;
  1216. disconnect_rx_endpoint(msm_uport);
  1217. MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush);
  1218. }
  1219. /* Tx timeout callback function */
  1220. void tx_timeout_handler(unsigned long arg)
  1221. {
  1222. struct msm_hs_port *msm_uport = (struct msm_hs_port *) arg;
  1223. struct uart_port *uport = &msm_uport->uport;
  1224. int isr;
  1225. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
  1226. MSM_HS_WARN("%s(): clocks are off", __func__);
  1227. return;
  1228. }
  1229. isr = msm_hs_read(uport, UART_DM_ISR);
  1230. if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
  1231. MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
  1232. dump_uart_hs_registers(msm_uport);
  1233. }
  1234. /* Transmit the next chunk of data */
  1235. static void msm_hs_submit_tx_locked(struct uart_port *uport)
  1236. {
  1237. int left;
  1238. int tx_count;
  1239. int aligned_tx_count;
  1240. dma_addr_t src_addr;
  1241. dma_addr_t aligned_src_addr;
  1242. u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
  1243. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1244. struct msm_hs_tx *tx = &msm_uport->tx;
  1245. struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
  1246. struct sps_pipe *sps_pipe_handle;
  1247. int ret;
  1248. if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
  1249. tx->dma_in_flight = false;
  1250. msm_hs_stop_tx_locked(uport);
  1251. return;
  1252. }
  1253. tx_count = uart_circ_chars_pending(tx_buf);
  1254. if (tx_count > UARTDM_TX_BUF_SIZE)
  1255. tx_count = UARTDM_TX_BUF_SIZE;
  1256. left = UART_XMIT_SIZE - tx_buf->tail;
  1257. if (tx_count > left)
  1258. tx_count = left;
  1259. src_addr = tx->dma_base + tx_buf->tail;
  1260. /* Mask the src_addr to align on a cache
  1261. * and add those bytes to tx_count
  1262. */
  1263. aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
  1264. aligned_tx_count = tx_count + src_addr - aligned_src_addr;
  1265. dma_sync_single_for_device(uport->dev, aligned_src_addr,
  1266. aligned_tx_count, DMA_TO_DEVICE);
  1267. tx->tx_count = tx_count;
  1268. hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
  1269. &tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
  1270. sps_pipe_handle = tx->cons.pipe_handle;
  1271. /* Set 1 second timeout */
  1272. mod_timer(&tx->tx_timeout_timer,
  1273. jiffies + msecs_to_jiffies(MSEC_PER_SEC));
  1274. /* Queue transfer request to SPS */
  1275. ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
  1276. msm_uport, flags);
  1277. MSM_HS_DBG("%s:Enqueue Tx Cmd, ret %d\n", __func__, ret);
  1278. }
  1279. /* This function queues the rx descriptor for BAM transfer */
  1280. static void msm_hs_post_rx_desc(struct msm_hs_port *msm_uport, int inx)
  1281. {
  1282. u32 flags = SPS_IOVEC_FLAG_INT;
  1283. struct msm_hs_rx *rx = &msm_uport->rx;
  1284. int ret;
  1285. phys_addr_t rbuff_addr = rx->rbuffer + (UARTDM_RX_BUF_SIZE * inx);
  1286. u8 *virt_addr = rx->buffer + (UARTDM_RX_BUF_SIZE * inx);
  1287. MSM_HS_DBG("%s: %d:Queue desc %d, 0x%llx, base 0x%llx virtaddr %p",
  1288. __func__, msm_uport->uport.line, inx,
  1289. (u64)rbuff_addr, (u64)rx->rbuffer, virt_addr);
  1290. rx->iovec[inx].size = 0;
  1291. ret = sps_transfer_one(rx->prod.pipe_handle, rbuff_addr,
  1292. UARTDM_RX_BUF_SIZE, msm_uport, flags);
  1293. if (ret)
  1294. MSM_HS_ERR("Error processing descriptor %d", ret);
  1295. }
  1296. /* Update the rx descriptor index to specify the next one to be processed */
  1297. static void msm_hs_mark_next(struct msm_hs_port *msm_uport, int inx)
  1298. {
  1299. struct msm_hs_rx *rx = &msm_uport->rx;
  1300. int prev;
  1301. inx %= UART_DMA_DESC_NR;
  1302. MSM_HS_DBG("%s(): inx %d, pending 0x%lx", __func__, inx,
  1303. rx->pending_flag);
  1304. if (!inx)
  1305. prev = UART_DMA_DESC_NR - 1;
  1306. else
  1307. prev = inx - 1;
  1308. if (!test_bit(prev, &rx->pending_flag))
  1309. msm_uport->rx.rx_inx = inx;
  1310. MSM_HS_DBG("%s(): prev %d pending flag 0x%lx, next %d", __func__,
  1311. prev, rx->pending_flag, msm_uport->rx.rx_inx);
  1312. }
  1313. /*
  1314. * Queue the rx descriptor that has just been processed or
  1315. * all of them if queueing for the first time
  1316. */
  1317. static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport)
  1318. {
  1319. struct msm_hs_rx *rx = &msm_uport->rx;
  1320. int i, flag = 0;
  1321. /* At first, queue all, if not, queue only one */
  1322. if (rx->queued_flag || rx->pending_flag) {
  1323. if (!test_bit(rx->rx_inx, &rx->queued_flag) &&
  1324. !test_bit(rx->rx_inx, &rx->pending_flag)) {
  1325. msm_hs_post_rx_desc(msm_uport, rx->rx_inx);
  1326. set_bit(rx->rx_inx, &rx->queued_flag);
  1327. MSM_HS_DBG("%s(): Set Queued Bit %d",
  1328. __func__, rx->rx_inx);
  1329. } else
  1330. MSM_HS_ERR("%s(): rx_inx pending or queued", __func__);
  1331. return;
  1332. }
  1333. for (i = 0; i < UART_DMA_DESC_NR; i++) {
  1334. if (!test_bit(i, &rx->queued_flag) &&
  1335. !test_bit(i, &rx->pending_flag)) {
  1336. MSM_HS_DBG("%s(): Calling post rx %d", __func__, i);
  1337. msm_hs_post_rx_desc(msm_uport, i);
  1338. set_bit(i, &rx->queued_flag);
  1339. flag = 1;
  1340. }
  1341. }
  1342. if (!flag)
  1343. MSM_HS_ERR("%s(): error queueing descriptor", __func__);
  1344. }
  1345. /* Start to receive the next chunk of data */
  1346. static void msm_hs_start_rx_locked(struct uart_port *uport)
  1347. {
  1348. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1349. struct msm_hs_rx *rx = &msm_uport->rx;
  1350. unsigned int buffer_pending = msm_uport->rx.buffer_pending;
  1351. unsigned int data;
  1352. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
  1353. MSM_HS_WARN("%s(): Clocks are off\n", __func__);
  1354. return;
  1355. }
  1356. if (rx->pending_flag) {
  1357. MSM_HS_INFO("%s: Rx Cmd got executed, wait for rx_tlet\n",
  1358. __func__);
  1359. rx->flush = FLUSH_IGNORE;
  1360. return;
  1361. }
  1362. if (buffer_pending)
  1363. MSM_HS_ERR("Error: rx started in buffer state =%x",
  1364. buffer_pending);
  1365. msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
  1366. msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
  1367. msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
  1368. /*
  1369. * Enable UARTDM Rx Interface as previously it has been
  1370. * disable in set_termios before configuring baud rate.
  1371. */
  1372. data = msm_hs_read(uport, UART_DM_DMEN);
  1373. /* Enable UARTDM Rx BAM Interface */
  1374. data |= UARTDM_RX_BAM_ENABLE_BMSK;
  1375. msm_hs_write(uport, UART_DM_DMEN, data);
  1376. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1377. /* Calling next DMOV API. Hence mb() here. */
  1378. mb();
  1379. /*
  1380. * RX-transfer will be automatically re-activated
  1381. * after last data of previous transfer was read.
  1382. */
  1383. data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
  1384. RX_DMRX_CYCLIC_EN);
  1385. msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
  1386. /* Issue RX BAM Start IFC command */
  1387. msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
  1388. /* Ensure register IO completion */
  1389. mb();
  1390. msm_uport->rx.flush = FLUSH_NONE;
  1391. msm_uport->rx_bam_inprogress = true;
  1392. msm_hs_queue_rx_desc(msm_uport);
  1393. msm_uport->rx_bam_inprogress = false;
  1394. wake_up(&msm_uport->rx.wait);
  1395. MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
  1396. }
  1397. static void flip_insert_work(struct work_struct *work)
  1398. {
  1399. unsigned long flags;
  1400. int retval;
  1401. struct msm_hs_port *msm_uport =
  1402. container_of(work, struct msm_hs_port,
  1403. rx.flip_insert_work.work);
  1404. struct tty_struct *tty = msm_uport->uport.state->port.tty;
  1405. spin_lock_irqsave(&msm_uport->uport.lock, flags);
  1406. if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
  1407. dev_err(msm_uport->uport.dev,
  1408. "%s:Invalid driver state flush %d\n",
  1409. __func__, msm_uport->rx.flush);
  1410. MSM_HS_ERR("%s:Invalid driver state flush %d\n",
  1411. __func__, msm_uport->rx.flush);
  1412. spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
  1413. return;
  1414. }
  1415. if (msm_uport->rx.buffer_pending == NONE_PENDING) {
  1416. MSM_HS_ERR("Error: No buffer pending in %s", __func__);
  1417. spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
  1418. return;
  1419. }
  1420. if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
  1421. retval = tty_insert_flip_char(tty->port, 0, TTY_OVERRUN);
  1422. if (retval)
  1423. msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
  1424. }
  1425. if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
  1426. retval = tty_insert_flip_char(tty->port, 0, TTY_PARITY);
  1427. if (retval)
  1428. msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
  1429. }
  1430. if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
  1431. int rx_count, rx_offset;
  1432. rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
  1433. rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
  1434. retval = tty_insert_flip_string(tty->port,
  1435. msm_uport->rx.buffer +
  1436. (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)
  1437. + rx_offset, rx_count);
  1438. msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
  1439. PARITY_ERROR);
  1440. if (retval != rx_count)
  1441. msm_uport->rx.buffer_pending |= CHARS_NORMAL |
  1442. retval << 8 | (rx_count - retval) << 16;
  1443. }
  1444. if (msm_uport->rx.buffer_pending) {
  1445. schedule_delayed_work(&msm_uport->rx.flip_insert_work,
  1446. msecs_to_jiffies(RETRY_TIMEOUT));
  1447. } else if (msm_uport->rx.flush <= FLUSH_IGNORE) {
  1448. MSM_HS_WARN("Pending buffers cleared, restarting");
  1449. clear_bit(msm_uport->rx.rx_inx,
  1450. &msm_uport->rx.pending_flag);
  1451. msm_hs_start_rx_locked(&msm_uport->uport);
  1452. msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
  1453. }
  1454. spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
  1455. tty_flip_buffer_push(tty->port);
  1456. }
  1457. static void msm_serial_hs_rx_work(struct kthread_work *work)
  1458. {
  1459. int retval;
  1460. int rx_count = 0;
  1461. unsigned long status;
  1462. unsigned long flags;
  1463. unsigned int error_f = 0;
  1464. struct uart_port *uport;
  1465. struct msm_hs_port *msm_uport;
  1466. unsigned int flush = FLUSH_DATA_INVALID;
  1467. struct tty_struct *tty;
  1468. struct sps_event_notify *notify;
  1469. struct msm_hs_rx *rx;
  1470. struct sps_pipe *sps_pipe_handle;
  1471. struct platform_device *pdev;
  1472. const struct msm_serial_hs_platform_data *pdata;
  1473. msm_uport = container_of((struct kthread_work *) work,
  1474. struct msm_hs_port, rx.kwork);
  1475. msm_hs_resource_vote(msm_uport);
  1476. uport = &msm_uport->uport;
  1477. tty = uport->state->port.tty;
  1478. notify = &msm_uport->notify;
  1479. rx = &msm_uport->rx;
  1480. pdev = to_platform_device(uport->dev);
  1481. pdata = pdev->dev.platform_data;
  1482. spin_lock_irqsave(&uport->lock, flags);
  1483. if (!tty || rx->flush == FLUSH_SHUTDOWN) {
  1484. dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
  1485. __func__, rx->flush);
  1486. MSM_HS_ERR("%s:Invalid driver state flush %d\n",
  1487. __func__, rx->flush);
  1488. spin_unlock_irqrestore(&uport->lock, flags);
  1489. msm_hs_resource_unvote(msm_uport);
  1490. return;
  1491. }
  1492. /*
  1493. * Process all pending descs or if nothing is
  1494. * queued - called from termios
  1495. */
  1496. while (!rx->buffer_pending &&
  1497. (rx->pending_flag || !rx->queued_flag)) {
  1498. MSM_HS_DBG("%s(): Loop P 0x%lx Q 0x%lx", __func__,
  1499. rx->pending_flag, rx->queued_flag);
  1500. status = msm_hs_read(uport, UART_DM_SR);
  1501. MSM_HS_DBG("In %s\n", __func__);
  1502. /* overflow is not connect to data in a FIFO */
  1503. if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
  1504. (uport->read_status_mask & CREAD))) {
  1505. retval = tty_insert_flip_char(tty->port,
  1506. 0, TTY_OVERRUN);
  1507. MSM_HS_WARN("%s(): RX Buffer Overrun Detected\n",
  1508. __func__);
  1509. if (!retval)
  1510. msm_uport->rx.buffer_pending |= TTY_OVERRUN;
  1511. uport->icount.buf_overrun++;
  1512. error_f = 1;
  1513. }
  1514. if (!(uport->ignore_status_mask & INPCK))
  1515. status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
  1516. if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
  1517. /* Can not tell diff between parity & frame error */
  1518. MSM_HS_WARN("msm_serial_hs: parity error\n");
  1519. uport->icount.parity++;
  1520. error_f = 1;
  1521. if (!(uport->ignore_status_mask & IGNPAR)) {
  1522. retval = tty_insert_flip_char(tty->port,
  1523. 0, TTY_PARITY);
  1524. if (!retval)
  1525. msm_uport->rx.buffer_pending
  1526. |= TTY_PARITY;
  1527. }
  1528. }
  1529. if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
  1530. MSM_HS_DBG("msm_serial_hs: Rx break\n");
  1531. uport->icount.brk++;
  1532. error_f = 1;
  1533. if (!(uport->ignore_status_mask & IGNBRK)) {
  1534. retval = tty_insert_flip_char(tty->port,
  1535. 0, TTY_BREAK);
  1536. if (!retval)
  1537. msm_uport->rx.buffer_pending
  1538. |= TTY_BREAK;
  1539. }
  1540. }
  1541. if (error_f)
  1542. msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
  1543. flush = msm_uport->rx.flush;
  1544. if (flush == FLUSH_IGNORE)
  1545. if (!msm_uport->rx.buffer_pending) {
  1546. MSM_HS_DBG("%s: calling start_rx_locked\n",
  1547. __func__);
  1548. msm_hs_start_rx_locked(uport);
  1549. }
  1550. if (flush >= FLUSH_DATA_INVALID)
  1551. goto out;
  1552. rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
  1553. hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
  1554. (msm_uport->rx.buffer +
  1555. (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
  1556. msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
  1557. rx_count);
  1558. /*
  1559. * We are in a spin locked context, spin lock taken at
  1560. * other places where these flags are updated
  1561. */
  1562. if (0 != (uport->read_status_mask & CREAD)) {
  1563. if (!test_bit(msm_uport->rx.rx_inx,
  1564. &msm_uport->rx.pending_flag) &&
  1565. !test_bit(msm_uport->rx.rx_inx,
  1566. &msm_uport->rx.queued_flag))
  1567. MSM_HS_ERR("%s: RX INX not set", __func__);
  1568. else if (test_bit(msm_uport->rx.rx_inx,
  1569. &msm_uport->rx.pending_flag) &&
  1570. !test_bit(msm_uport->rx.rx_inx,
  1571. &msm_uport->rx.queued_flag)) {
  1572. MSM_HS_DBG("%s(): Clear Pending Bit %d",
  1573. __func__, msm_uport->rx.rx_inx);
  1574. retval = tty_insert_flip_string(tty->port,
  1575. msm_uport->rx.buffer +
  1576. (msm_uport->rx.rx_inx *
  1577. UARTDM_RX_BUF_SIZE),
  1578. rx_count);
  1579. if (retval != rx_count) {
  1580. MSM_HS_INFO("%s(): ret %d rx_count %d",
  1581. __func__, retval, rx_count);
  1582. msm_uport->rx.buffer_pending |=
  1583. CHARS_NORMAL | retval << 5 |
  1584. (rx_count - retval) << 16;
  1585. }
  1586. } else
  1587. MSM_HS_ERR("%s: Error in inx %d", __func__,
  1588. msm_uport->rx.rx_inx);
  1589. }
  1590. if (!msm_uport->rx.buffer_pending) {
  1591. msm_uport->rx.flush = FLUSH_NONE;
  1592. msm_uport->rx_bam_inprogress = true;
  1593. sps_pipe_handle = rx->prod.pipe_handle;
  1594. MSM_HS_DBG("Queing bam descriptor\n");
  1595. /* Queue transfer request to SPS */
  1596. clear_bit(msm_uport->rx.rx_inx,
  1597. &msm_uport->rx.pending_flag);
  1598. msm_hs_queue_rx_desc(msm_uport);
  1599. msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
  1600. msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
  1601. msm_uport->rx_bam_inprogress = false;
  1602. wake_up(&msm_uport->rx.wait);
  1603. } else
  1604. break;
  1605. }
  1606. out:
  1607. if (msm_uport->rx.buffer_pending) {
  1608. MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
  1609. schedule_delayed_work(&msm_uport->rx.flip_insert_work
  1610. , msecs_to_jiffies(RETRY_TIMEOUT));
  1611. }
  1612. /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
  1613. spin_unlock_irqrestore(&uport->lock, flags);
  1614. if (flush < FLUSH_DATA_INVALID)
  1615. tty_flip_buffer_push(tty->port);
  1616. msm_hs_resource_unvote(msm_uport);
  1617. }
  1618. static void msm_hs_start_tx_locked(struct uart_port *uport)
  1619. {
  1620. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1621. struct msm_hs_tx *tx = &msm_uport->tx;
  1622. /* Bail if transfer in progress */
  1623. if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
  1624. MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
  1625. __func__, tx->flush, tx->dma_in_flight);
  1626. return;
  1627. }
  1628. if (!tx->dma_in_flight) {
  1629. tx->dma_in_flight = true;
  1630. kthread_queue_work(&msm_uport->tx.kworker,
  1631. &msm_uport->tx.kwork);
  1632. }
  1633. }
  1634. /**
  1635. * Callback notification from SPS driver
  1636. *
  1637. * This callback function gets triggered called from
  1638. * SPS driver when requested SPS data transfer is
  1639. * completed.
  1640. *
  1641. */
  1642. static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
  1643. {
  1644. struct msm_hs_port *msm_uport =
  1645. (struct msm_hs_port *)
  1646. ((struct sps_event_notify *)notify)->user;
  1647. phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
  1648. notify->data.transfer.iovec.addr);
  1649. msm_uport->notify = *notify;
  1650. MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
  1651. &addr, notify->data.transfer.iovec.size,
  1652. notify->data.transfer.iovec.flags);
  1653. del_timer(&msm_uport->tx.tx_timeout_timer);
  1654. MSM_HS_DBG("%s(): Queue kthread work", __func__);
  1655. kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
  1656. }
  1657. static void msm_serial_hs_tx_work(struct kthread_work *work)
  1658. {
  1659. unsigned long flags;
  1660. struct msm_hs_port *msm_uport =
  1661. container_of((struct kthread_work *)work,
  1662. struct msm_hs_port, tx.kwork);
  1663. struct uart_port *uport = &msm_uport->uport;
  1664. struct circ_buf *tx_buf = &uport->state->xmit;
  1665. struct msm_hs_tx *tx = &msm_uport->tx;
  1666. /*
  1667. * Do the work buffer related work in BAM
  1668. * mode that is equivalent to legacy mode
  1669. */
  1670. msm_hs_resource_vote(msm_uport);
  1671. if (tx->flush >= FLUSH_STOP) {
  1672. spin_lock_irqsave(&(msm_uport->uport.lock), flags);
  1673. tx->flush = FLUSH_NONE;
  1674. MSM_HS_DBG("%s(): calling submit_tx", __func__);
  1675. msm_hs_submit_tx_locked(uport);
  1676. spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
  1677. msm_hs_resource_unvote(msm_uport);
  1678. return;
  1679. }
  1680. spin_lock_irqsave(&(msm_uport->uport.lock), flags);
  1681. if (!uart_circ_empty(tx_buf))
  1682. tx_buf->tail = (tx_buf->tail +
  1683. tx->tx_count) & ~UART_XMIT_SIZE;
  1684. else
  1685. MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
  1686. wake_up(&msm_uport->tx.wait);
  1687. uport->icount.tx += tx->tx_count;
  1688. /*
  1689. * Calling to send next chunk of data
  1690. * If the circ buffer is empty, we stop
  1691. * If the clock off was requested, the clock
  1692. * off sequence is kicked off
  1693. */
  1694. MSM_HS_DBG("%s(): calling submit_tx", __func__);
  1695. msm_hs_submit_tx_locked(uport);
  1696. if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
  1697. uart_write_wakeup(uport);
  1698. spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
  1699. msm_hs_resource_unvote(msm_uport);
  1700. }
  1701. static void
  1702. msm_hs_mark_proc_rx_desc(struct msm_hs_port *msm_uport,
  1703. struct sps_event_notify *notify)
  1704. {
  1705. struct msm_hs_rx *rx = &msm_uport->rx;
  1706. phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
  1707. notify->data.transfer.iovec.addr);
  1708. /* divide by UARTDM_RX_BUF_SIZE */
  1709. int inx = (addr - rx->rbuffer) >> 9;
  1710. set_bit(inx, &rx->pending_flag);
  1711. clear_bit(inx, &rx->queued_flag);
  1712. rx->iovec[inx] = notify->data.transfer.iovec;
  1713. MSM_HS_DBG("Clear Q, Set P Bit %d, Q 0x%lx P 0x%lx",
  1714. inx, rx->queued_flag, rx->pending_flag);
  1715. }
  1716. /**
  1717. * Callback notification from SPS driver
  1718. *
  1719. * This callback function gets triggered called from
  1720. * SPS driver when requested SPS data transfer is
  1721. * completed.
  1722. *
  1723. */
  1724. static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
  1725. {
  1726. struct msm_hs_port *msm_uport =
  1727. (struct msm_hs_port *)
  1728. ((struct sps_event_notify *)notify)->user;
  1729. struct uart_port *uport;
  1730. unsigned long flags;
  1731. struct msm_hs_rx *rx = &msm_uport->rx;
  1732. phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
  1733. notify->data.transfer.iovec.addr);
  1734. /* divide by UARTDM_RX_BUF_SIZE */
  1735. int inx = (addr - rx->rbuffer) >> 9;
  1736. uport = &(msm_uport->uport);
  1737. msm_uport->notify = *notify;
  1738. MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
  1739. &addr, notify->data.transfer.iovec.size,
  1740. notify->data.transfer.iovec.flags);
  1741. spin_lock_irqsave(&uport->lock, flags);
  1742. msm_hs_mark_proc_rx_desc(msm_uport, notify);
  1743. spin_unlock_irqrestore(&uport->lock, flags);
  1744. if (msm_uport->rx.flush == FLUSH_NONE) {
  1745. /* Test if others are queued */
  1746. if (msm_uport->rx.pending_flag & ~(1 << inx)) {
  1747. MSM_HS_DBG("%s(): inx 0x%x, 0x%lx not processed",
  1748. __func__, inx,
  1749. msm_uport->rx.pending_flag & ~(1<<inx));
  1750. }
  1751. kthread_queue_work(&msm_uport->rx.kworker,
  1752. &msm_uport->rx.kwork);
  1753. MSM_HS_DBG("%s(): Scheduled rx_tlet", __func__);
  1754. }
  1755. }
  1756. /*
  1757. * Standard API, Current states of modem control inputs
  1758. *
  1759. * Since CTS can be handled entirely by HARDWARE we always
  1760. * indicate clear to send and count on the TX FIFO to block when
  1761. * it fills up.
  1762. *
  1763. * - TIOCM_DCD
  1764. * - TIOCM_CTS
  1765. * - TIOCM_DSR
  1766. * - TIOCM_RI
  1767. * (Unsupported) DCD and DSR will return them high. RI will return low.
  1768. */
  1769. static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
  1770. {
  1771. return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
  1772. }
  1773. /*
  1774. * Standard API, Set or clear RFR_signal
  1775. *
  1776. * Set RFR high, (Indicate we are not ready for data), we disable auto
  1777. * ready for receiving and then set RFR_N high. To set RFR to low we just turn
  1778. * back auto ready for receiving and it should lower RFR signal
  1779. * when hardware is ready
  1780. */
  1781. void msm_hs_set_mctrl_locked(struct uart_port *uport,
  1782. unsigned int mctrl)
  1783. {
  1784. unsigned int set_rts;
  1785. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1786. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
  1787. MSM_HS_WARN("%s(): Clocks are off\n", __func__);
  1788. return;
  1789. }
  1790. /* RTS is active low */
  1791. set_rts = TIOCM_RTS & mctrl ? 0 : 1;
  1792. MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
  1793. if (set_rts)
  1794. msm_hs_disable_flow_control(uport, false);
  1795. else
  1796. msm_hs_enable_flow_control(uport, false);
  1797. }
  1798. void msm_hs_set_mctrl(struct uart_port *uport,
  1799. unsigned int mctrl)
  1800. {
  1801. unsigned long flags;
  1802. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1803. msm_hs_resource_vote(msm_uport);
  1804. spin_lock_irqsave(&uport->lock, flags);
  1805. msm_hs_set_mctrl_locked(uport, mctrl);
  1806. spin_unlock_irqrestore(&uport->lock, flags);
  1807. msm_hs_resource_unvote(msm_uport);
  1808. }
  1809. EXPORT_SYMBOL(msm_hs_set_mctrl);
  1810. /* Standard API, Enable modem status (CTS) interrupt */
  1811. static void msm_hs_enable_ms_locked(struct uart_port *uport)
  1812. {
  1813. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1814. if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
  1815. MSM_HS_WARN("%s(): Clocks are off\n", __func__);
  1816. return;
  1817. }
  1818. /* Enable DELTA_CTS Interrupt */
  1819. msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
  1820. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1821. /* Ensure register IO completion */
  1822. mb();
  1823. }
  1824. /*
  1825. * Standard API, Break Signal
  1826. *
  1827. * Control the transmission of a break signal. ctl eq 0 => break
  1828. * signal terminate ctl ne 0 => start break signal
  1829. */
  1830. static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
  1831. {
  1832. unsigned long flags;
  1833. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1834. msm_hs_resource_vote(msm_uport);
  1835. spin_lock_irqsave(&uport->lock, flags);
  1836. msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
  1837. /* Ensure register IO completion */
  1838. mb();
  1839. spin_unlock_irqrestore(&uport->lock, flags);
  1840. msm_hs_resource_unvote(msm_uport);
  1841. }
  1842. static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
  1843. {
  1844. if (cfg_flags & UART_CONFIG_TYPE)
  1845. uport->type = PORT_MSM;
  1846. }
  1847. /* Handle CTS changes (Called from interrupt handler) */
  1848. static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
  1849. {
  1850. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  1851. msm_hs_resource_vote(msm_uport);
  1852. /* clear interrupt */
  1853. msm_hs_write(uport, UART_DM_CR, RESET_CTS);
  1854. /* Calling CLOCK API. Hence mb() requires here. */
  1855. mb();
  1856. uport->icount.cts++;
  1857. /* clear the IOCTL TIOCMIWAIT if called */
  1858. wake_up_interruptible(&uport->state->port.delta_msr_wait);
  1859. msm_hs_resource_unvote(msm_uport);
  1860. }
  1861. static irqreturn_t msm_hs_isr(int irq, void *dev)
  1862. {
  1863. unsigned long flags;
  1864. unsigned int isr_status;
  1865. struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
  1866. struct uart_port *uport = &msm_uport->uport;
  1867. struct circ_buf *tx_buf = &uport->state->xmit;
  1868. struct msm_hs_tx *tx = &msm_uport->tx;
  1869. spin_lock_irqsave(&uport->lock, flags);
  1870. isr_status = msm_hs_read(uport, UART_DM_MISR);
  1871. MSM_HS_INFO("%s: DM_ISR: 0x%x\n", __func__, isr_status);
  1872. dump_uart_hs_registers(msm_uport);
  1873. /* Uart RX starting */
  1874. if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
  1875. MSM_HS_DBG("%s:UARTDM_ISR_RXLEV_BMSK\n", __func__);
  1876. msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
  1877. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1878. /* Complete device write for IMR. Hence mb() requires. */
  1879. mb();
  1880. }
  1881. /* Stale rx interrupt */
  1882. if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
  1883. msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
  1884. msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
  1885. /*
  1886. * Complete device write before calling DMOV API. Hence
  1887. * mb() requires here.
  1888. */
  1889. mb();
  1890. MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
  1891. }
  1892. /* tx ready interrupt */
  1893. if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
  1894. MSM_HS_DBG("%s: ISR_TX_READY Interrupt\n", __func__);
  1895. /* Clear TX Ready */
  1896. msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
  1897. /*
  1898. * Complete both writes before starting new TX.
  1899. * Hence mb() requires here.
  1900. */
  1901. mb();
  1902. /* Complete DMA TX transactions and submit new transactions */
  1903. /* Do not update tx_buf.tail if uart_flush_buffer already
  1904. * called in serial core
  1905. */
  1906. if (!uart_circ_empty(tx_buf))
  1907. tx_buf->tail = (tx_buf->tail +
  1908. tx->tx_count) & ~UART_XMIT_SIZE;
  1909. tx->dma_in_flight = false;
  1910. uport->icount.tx += tx->tx_count;
  1911. if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
  1912. uart_write_wakeup(uport);
  1913. }
  1914. if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
  1915. /* TX FIFO is empty */
  1916. msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
  1917. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  1918. MSM_HS_DBG("%s: TXLEV Interrupt\n", __func__);
  1919. /*
  1920. * Complete device write before starting clock_off request.
  1921. * Hence mb() requires here.
  1922. */
  1923. mb();
  1924. queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
  1925. }
  1926. /* Change in CTS interrupt */
  1927. if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
  1928. msm_hs_handle_delta_cts_locked(uport);
  1929. spin_unlock_irqrestore(&uport->lock, flags);
  1930. return IRQ_HANDLED;
  1931. }
  1932. /* The following two functions provide interfaces to get the underlying
  1933. * port structure (struct uart_port or struct msm_hs_port) given
  1934. * the port index. msm_hs_get_uart port is called by clients.
  1935. * The function msm_hs_get_hs_port is for internal use
  1936. */
  1937. struct uart_port *msm_hs_get_uart_port(int port_index)
  1938. {
  1939. struct uart_state *state = msm_hs_driver.state + port_index;
  1940. /* The uart_driver structure stores the states in an array.
  1941. * Thus the corresponding offset from the drv->state returns
  1942. * the state for the uart_port that is requested
  1943. */
  1944. if (port_index == state->uart_port->line)
  1945. return state->uart_port;
  1946. return NULL;
  1947. }
  1948. EXPORT_SYMBOL(msm_hs_get_uart_port);
  1949. static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
  1950. {
  1951. struct uart_port *uport = msm_hs_get_uart_port(port_index);
  1952. if (uport)
  1953. return UARTDM_TO_MSM(uport);
  1954. return NULL;
  1955. }
  1956. void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
  1957. {
  1958. unsigned long flags;
  1959. struct uart_port *uport = &(msm_uport->uport);
  1960. if (!is_use_low_power_wakeup(msm_uport))
  1961. return;
  1962. if (msm_uport->wakeup.freed)
  1963. return;
  1964. if (!(msm_uport->wakeup.enabled)) {
  1965. spin_lock_irqsave(&uport->lock, flags);
  1966. msm_uport->wakeup.ignore = 1;
  1967. msm_uport->wakeup.enabled = true;
  1968. spin_unlock_irqrestore(&uport->lock, flags);
  1969. disable_irq(uport->irq);
  1970. enable_irq(msm_uport->wakeup.irq);
  1971. } else {
  1972. MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
  1973. }
  1974. }
  1975. void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
  1976. {
  1977. unsigned long flags;
  1978. struct uart_port *uport = &(msm_uport->uport);
  1979. if (!is_use_low_power_wakeup(msm_uport))
  1980. return;
  1981. if (msm_uport->wakeup.freed)
  1982. return;
  1983. if (msm_uport->wakeup.enabled) {
  1984. disable_irq_nosync(msm_uport->wakeup.irq);
  1985. enable_irq(uport->irq);
  1986. spin_lock_irqsave(&uport->lock, flags);
  1987. msm_uport->wakeup.enabled = false;
  1988. spin_unlock_irqrestore(&uport->lock, flags);
  1989. } else {
  1990. MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
  1991. }
  1992. }
  1993. void msm_hs_resource_off(struct msm_hs_port *msm_uport)
  1994. {
  1995. struct uart_port *uport = &(msm_uport->uport);
  1996. unsigned int data;
  1997. MSM_HS_DBG("%s(): begin", __func__);
  1998. msm_hs_disable_flow_control(uport, false);
  1999. if (msm_uport->rx.flush == FLUSH_NONE)
  2000. msm_hs_disconnect_rx(uport);
  2001. /* disable dlink */
  2002. if (msm_uport->tx.flush == FLUSH_NONE)
  2003. wait_event_timeout(msm_uport->tx.wait,
  2004. msm_uport->tx.flush == FLUSH_STOP, 500);
  2005. if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
  2006. data = msm_hs_read(uport, UART_DM_DMEN);
  2007. data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
  2008. msm_hs_write(uport, UART_DM_DMEN, data);
  2009. sps_tx_disconnect(msm_uport);
  2010. }
  2011. if (!atomic_read(&msm_uport->client_req_state))
  2012. msm_hs_enable_flow_control(uport, false);
  2013. }
  2014. void msm_hs_resource_on(struct msm_hs_port *msm_uport)
  2015. {
  2016. struct uart_port *uport = &(msm_uport->uport);
  2017. unsigned int data;
  2018. unsigned long flags;
  2019. if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
  2020. msm_uport->rx.flush == FLUSH_STOP) {
  2021. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  2022. data = msm_hs_read(uport, UART_DM_DMEN);
  2023. data |= UARTDM_RX_BAM_ENABLE_BMSK;
  2024. msm_hs_write(uport, UART_DM_DMEN, data);
  2025. }
  2026. msm_hs_spsconnect_tx(msm_uport);
  2027. if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
  2028. msm_hs_spsconnect_rx(uport);
  2029. spin_lock_irqsave(&uport->lock, flags);
  2030. msm_hs_start_rx_locked(uport);
  2031. spin_unlock_irqrestore(&uport->lock, flags);
  2032. }
  2033. }
  2034. /* Request to turn off uart clock once pending TX is flushed */
  2035. int msm_hs_request_clock_off(struct uart_port *uport)
  2036. {
  2037. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2038. int ret = 0;
  2039. int client_count = 0;
  2040. mutex_lock(&msm_uport->mtx);
  2041. /*
  2042. * If we're in the middle of a system suspend, don't process these
  2043. * userspace/kernel API commands.
  2044. */
  2045. if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
  2046. MSM_HS_WARN("%s:Can't process clk request during suspend",
  2047. __func__);
  2048. ret = -EIO;
  2049. }
  2050. mutex_unlock(&msm_uport->mtx);
  2051. if (ret)
  2052. goto exit_request_clock_off;
  2053. if (atomic_read(&msm_uport->client_count) <= 0) {
  2054. MSM_HS_WARN("%s(): ioctl count -ve, client check voting",
  2055. __func__);
  2056. ret = -EPERM;
  2057. goto exit_request_clock_off;
  2058. }
  2059. /* Set the flag to disable flow control and wakeup irq */
  2060. if (msm_uport->obs)
  2061. atomic_set(&msm_uport->client_req_state, 1);
  2062. msm_hs_resource_unvote(msm_uport);
  2063. atomic_dec(&msm_uport->client_count);
  2064. client_count = atomic_read(&msm_uport->client_count);
  2065. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  2066. "%s: Client_Count %d\n", __func__,
  2067. client_count);
  2068. exit_request_clock_off:
  2069. return ret;
  2070. }
  2071. EXPORT_SYMBOL(msm_hs_request_clock_off);
  2072. int msm_hs_request_clock_on(struct uart_port *uport)
  2073. {
  2074. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2075. int client_count;
  2076. int ret = 0;
  2077. mutex_lock(&msm_uport->mtx);
  2078. /*
  2079. * If we're in the middle of a system suspend, don't process these
  2080. * userspace/kernel API commands.
  2081. */
  2082. if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
  2083. MSM_HS_WARN("%s:Can't process clk request during suspend",
  2084. __func__);
  2085. ret = -EIO;
  2086. }
  2087. mutex_unlock(&msm_uport->mtx);
  2088. if (ret)
  2089. goto exit_request_clock_on;
  2090. msm_hs_resource_vote(UARTDM_TO_MSM(uport));
  2091. atomic_inc(&msm_uport->client_count);
  2092. client_count = atomic_read(&msm_uport->client_count);
  2093. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  2094. "%s: Client_Count %d\n", __func__,
  2095. client_count);
  2096. /* Clear the flag */
  2097. if (msm_uport->obs)
  2098. atomic_set(&msm_uport->client_req_state, 0);
  2099. exit_request_clock_on:
  2100. return ret;
  2101. }
  2102. EXPORT_SYMBOL(msm_hs_request_clock_on);
  2103. static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
  2104. {
  2105. unsigned int wakeup = 0;
  2106. unsigned long flags;
  2107. struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
  2108. struct uart_port *uport = &msm_uport->uport;
  2109. struct tty_struct *tty = NULL;
  2110. spin_lock_irqsave(&uport->lock, flags);
  2111. if (msm_uport->wakeup.ignore)
  2112. msm_uport->wakeup.ignore = 0;
  2113. else
  2114. wakeup = 1;
  2115. if (wakeup) {
  2116. /*
  2117. * Port was clocked off during rx, wake up and
  2118. * optionally inject char into tty rx
  2119. */
  2120. if (msm_uport->wakeup.inject_rx) {
  2121. tty = uport->state->port.tty;
  2122. tty_insert_flip_char(tty->port,
  2123. msm_uport->wakeup.rx_to_inject,
  2124. TTY_NORMAL);
  2125. hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
  2126. "Rx Inject",
  2127. &msm_uport->wakeup.rx_to_inject, 0, 1);
  2128. MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
  2129. msm_uport->wakeup.ignore);
  2130. }
  2131. }
  2132. spin_unlock_irqrestore(&uport->lock, flags);
  2133. if (wakeup && msm_uport->wakeup.inject_rx)
  2134. tty_flip_buffer_push(tty->port);
  2135. return IRQ_HANDLED;
  2136. }
  2137. static const char *msm_hs_type(struct uart_port *port)
  2138. {
  2139. return "MSM HS UART";
  2140. }
  2141. /**
  2142. * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
  2143. * @uport: uart port
  2144. */
  2145. static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
  2146. {
  2147. struct platform_device *pdev = to_platform_device(uport->dev);
  2148. const struct msm_serial_hs_platform_data *pdata =
  2149. pdev->dev.platform_data;
  2150. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2151. int ret;
  2152. if (msm_uport->use_pinctrl) {
  2153. ret = pinctrl_select_state(msm_uport->pinctrl,
  2154. msm_uport->gpio_state_suspend);
  2155. if (ret)
  2156. MSM_HS_ERR("%s():Failed to pinctrl set_state",
  2157. __func__);
  2158. } else if (pdata) {
  2159. if (gpio_is_valid(pdata->uart_tx_gpio))
  2160. gpio_free(pdata->uart_tx_gpio);
  2161. if (gpio_is_valid(pdata->uart_rx_gpio))
  2162. gpio_free(pdata->uart_rx_gpio);
  2163. if (gpio_is_valid(pdata->uart_cts_gpio))
  2164. gpio_free(pdata->uart_cts_gpio);
  2165. if (gpio_is_valid(pdata->uart_rfr_gpio))
  2166. gpio_free(pdata->uart_rfr_gpio);
  2167. } else
  2168. MSM_HS_ERR("Error:Pdata is NULL.\n");
  2169. }
  2170. /**
  2171. * msm_hs_config_uart_gpios - Configures UART GPIOs
  2172. * @uport: uart port
  2173. */
  2174. static int msm_hs_config_uart_gpios(struct uart_port *uport)
  2175. {
  2176. struct platform_device *pdev = to_platform_device(uport->dev);
  2177. const struct msm_serial_hs_platform_data *pdata =
  2178. pdev->dev.platform_data;
  2179. int ret = 0;
  2180. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2181. if (!IS_ERR_OR_NULL(msm_uport->pinctrl)) {
  2182. MSM_HS_DBG("%s(): Using Pinctrl", __func__);
  2183. msm_uport->use_pinctrl = true;
  2184. ret = pinctrl_select_state(msm_uport->pinctrl,
  2185. msm_uport->gpio_state_active);
  2186. if (ret)
  2187. MSM_HS_ERR("%s(): Failed to pinctrl set_state",
  2188. __func__);
  2189. return ret;
  2190. } else if (pdata) {
  2191. /* Fall back to using gpio lib */
  2192. if (gpio_is_valid(pdata->uart_tx_gpio)) {
  2193. ret = gpio_request(pdata->uart_tx_gpio,
  2194. "UART_TX_GPIO");
  2195. if (unlikely(ret)) {
  2196. MSM_HS_ERR("gpio request failed for:%d\n",
  2197. pdata->uart_tx_gpio);
  2198. goto exit_uart_config;
  2199. }
  2200. }
  2201. if (gpio_is_valid(pdata->uart_rx_gpio)) {
  2202. ret = gpio_request(pdata->uart_rx_gpio,
  2203. "UART_RX_GPIO");
  2204. if (unlikely(ret)) {
  2205. MSM_HS_ERR("gpio request failed for:%d\n",
  2206. pdata->uart_rx_gpio);
  2207. goto uart_tx_unconfig;
  2208. }
  2209. }
  2210. if (gpio_is_valid(pdata->uart_cts_gpio)) {
  2211. ret = gpio_request(pdata->uart_cts_gpio,
  2212. "UART_CTS_GPIO");
  2213. if (unlikely(ret)) {
  2214. MSM_HS_ERR("gpio request failed for:%d\n",
  2215. pdata->uart_cts_gpio);
  2216. goto uart_rx_unconfig;
  2217. }
  2218. }
  2219. if (gpio_is_valid(pdata->uart_rfr_gpio)) {
  2220. ret = gpio_request(pdata->uart_rfr_gpio,
  2221. "UART_RFR_GPIO");
  2222. if (unlikely(ret)) {
  2223. MSM_HS_ERR("gpio request failed for:%d\n",
  2224. pdata->uart_rfr_gpio);
  2225. goto uart_cts_unconfig;
  2226. }
  2227. }
  2228. } else {
  2229. MSM_HS_ERR("Pdata is NULL.\n");
  2230. ret = -EINVAL;
  2231. }
  2232. return ret;
  2233. uart_cts_unconfig:
  2234. if (gpio_is_valid(pdata->uart_cts_gpio))
  2235. gpio_free(pdata->uart_cts_gpio);
  2236. uart_rx_unconfig:
  2237. if (gpio_is_valid(pdata->uart_rx_gpio))
  2238. gpio_free(pdata->uart_rx_gpio);
  2239. uart_tx_unconfig:
  2240. if (gpio_is_valid(pdata->uart_tx_gpio))
  2241. gpio_free(pdata->uart_tx_gpio);
  2242. exit_uart_config:
  2243. return ret;
  2244. }
  2245. static void msm_hs_get_pinctrl_configs(struct uart_port *uport)
  2246. {
  2247. struct pinctrl_state *set_state;
  2248. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2249. msm_uport->pinctrl = devm_pinctrl_get(uport->dev);
  2250. if (IS_ERR_OR_NULL(msm_uport->pinctrl)) {
  2251. MSM_HS_DBG("%s(): Pinctrl not defined", __func__);
  2252. } else {
  2253. MSM_HS_DBG("%s(): Using Pinctrl", __func__);
  2254. msm_uport->use_pinctrl = true;
  2255. set_state = pinctrl_lookup_state(msm_uport->pinctrl,
  2256. PINCTRL_STATE_DEFAULT);
  2257. if (IS_ERR_OR_NULL(set_state)) {
  2258. dev_err(uport->dev,
  2259. "pinctrl lookup failed for default state");
  2260. goto pinctrl_fail;
  2261. }
  2262. MSM_HS_DBG("%s(): Pinctrl state active %p\n", __func__,
  2263. set_state);
  2264. msm_uport->gpio_state_active = set_state;
  2265. set_state = pinctrl_lookup_state(msm_uport->pinctrl,
  2266. PINCTRL_STATE_SLEEP);
  2267. if (IS_ERR_OR_NULL(set_state)) {
  2268. dev_err(uport->dev,
  2269. "pinctrl lookup failed for sleep state");
  2270. goto pinctrl_fail;
  2271. }
  2272. MSM_HS_DBG("%s(): Pinctrl state sleep %p\n", __func__,
  2273. set_state);
  2274. msm_uport->gpio_state_suspend = set_state;
  2275. return;
  2276. }
  2277. pinctrl_fail:
  2278. msm_uport->pinctrl = NULL;
  2279. }
  2280. /* Called when port is opened */
  2281. static int msm_hs_startup(struct uart_port *uport)
  2282. {
  2283. int ret;
  2284. int rfr_level;
  2285. unsigned long flags;
  2286. unsigned int data;
  2287. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2288. struct circ_buf *tx_buf = &uport->state->xmit;
  2289. struct msm_hs_tx *tx = &msm_uport->tx;
  2290. struct msm_hs_rx *rx = &msm_uport->rx;
  2291. struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
  2292. struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
  2293. rfr_level = uport->fifosize;
  2294. if (rfr_level > 16)
  2295. rfr_level -= 16;
  2296. tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
  2297. DMA_TO_DEVICE);
  2298. /* turn on uart clk */
  2299. msm_hs_resource_vote(msm_uport);
  2300. /* Set up Uart Receive */
  2301. msm_hs_write(uport, UART_DM_RFWR, 32);
  2302. /* Write to BADR explicitly to set up FIFO sizes */
  2303. msm_hs_write(uport, UARTDM_BADR_ADDR, 64);
  2304. /* configure the CR Protection to Enable */
  2305. msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
  2306. /*
  2307. * Enable Command register protection before going ahead as this hw
  2308. * configuration makes sure that issued cmd to CR register gets complete
  2309. * before next issued cmd start. Hence mb() requires here.
  2310. */
  2311. mb();
  2312. /*
  2313. * Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
  2314. * so any rx_break and character having parity of framing
  2315. * error don't enter inside UART RX FIFO.
  2316. */
  2317. data = msm_hs_read(uport, UART_DM_MR2);
  2318. data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
  2319. UARTDM_MR2_RX_ERROR_CHAR_OFF);
  2320. msm_hs_write(uport, UART_DM_MR2, data);
  2321. /* Ensure register IO completion */
  2322. mb();
  2323. if (is_use_low_power_wakeup(msm_uport)) {
  2324. ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
  2325. msm_hs_wakeup_isr,
  2326. IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
  2327. "msm_hs_wakeup", msm_uport);
  2328. if (unlikely(ret)) {
  2329. MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
  2330. __func__, ret);
  2331. goto unvote_exit;
  2332. }
  2333. msm_uport->wakeup.freed = false;
  2334. disable_irq(msm_uport->wakeup.irq);
  2335. msm_uport->wakeup.enabled = false;
  2336. ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
  2337. if (unlikely(ret)) {
  2338. MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
  2339. goto free_uart_irq;
  2340. }
  2341. }
  2342. ret = msm_hs_config_uart_gpios(uport);
  2343. if (ret) {
  2344. MSM_HS_ERR("Uart GPIO request failed\n");
  2345. goto free_uart_irq;
  2346. }
  2347. msm_hs_write(uport, UART_DM_DMEN, 0);
  2348. /* Connect TX */
  2349. sps_tx_disconnect(msm_uport);
  2350. ret = msm_hs_spsconnect_tx(msm_uport);
  2351. if (ret) {
  2352. MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX");
  2353. goto unconfig_uart_gpios;
  2354. }
  2355. /* Connect RX */
  2356. kthread_flush_worker(&msm_uport->rx.kworker);
  2357. if (rx->flush != FLUSH_SHUTDOWN)
  2358. disconnect_rx_endpoint(msm_uport);
  2359. ret = msm_hs_spsconnect_rx(uport);
  2360. if (ret) {
  2361. MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
  2362. goto sps_disconnect_tx;
  2363. }
  2364. data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
  2365. UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
  2366. UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
  2367. msm_hs_write(uport, UART_DM_BCR, data);
  2368. /* Set auto RFR Level */
  2369. data = msm_hs_read(uport, UART_DM_MR1);
  2370. data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
  2371. data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
  2372. data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
  2373. data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
  2374. msm_hs_write(uport, UART_DM_MR1, data);
  2375. /* Make sure RXSTALE count is non-zero */
  2376. data = msm_hs_read(uport, UART_DM_IPR);
  2377. if (!data) {
  2378. data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
  2379. msm_hs_write(uport, UART_DM_IPR, data);
  2380. }
  2381. /* Assume no flow control, unless termios sets it */
  2382. msm_uport->flow_control = false;
  2383. msm_hs_disable_flow_control(uport, true);
  2384. /* Reset TX */
  2385. msm_hs_write(uport, UART_DM_CR, RESET_TX);
  2386. msm_hs_write(uport, UART_DM_CR, RESET_RX);
  2387. msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
  2388. msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
  2389. msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
  2390. msm_hs_write(uport, UART_DM_CR, RESET_CTS);
  2391. msm_hs_write(uport, UART_DM_CR, RFR_LOW);
  2392. /* Turn on Uart Receiver */
  2393. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
  2394. /* Turn on Uart Transmitter */
  2395. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
  2396. tx->dma_in_flight = false;
  2397. MSM_HS_DBG("%s():desc usage flag 0x%lx", __func__, rx->queued_flag);
  2398. setup_timer(&(tx->tx_timeout_timer),
  2399. tx_timeout_handler,
  2400. (unsigned long) msm_uport);
  2401. /* Enable reading the current CTS, no harm even if CTS is ignored */
  2402. msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
  2403. /* TXLEV on empty TX fifo */
  2404. msm_hs_write(uport, UART_DM_TFWR, 4);
  2405. /*
  2406. * Complete all device write related configuration before
  2407. * queuing RX request. Hence mb() requires here.
  2408. */
  2409. mb();
  2410. ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
  2411. "msm_hs_uart", msm_uport);
  2412. if (unlikely(ret)) {
  2413. MSM_HS_ERR("%s():Error %d getting uart irq\n", __func__, ret);
  2414. goto sps_disconnect_rx;
  2415. }
  2416. spin_lock_irqsave(&uport->lock, flags);
  2417. atomic_set(&msm_uport->client_count, 0);
  2418. atomic_set(&msm_uport->client_req_state, 0);
  2419. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  2420. "%s: Client_Count 0\n", __func__);
  2421. msm_hs_start_rx_locked(uport);
  2422. spin_unlock_irqrestore(&uport->lock, flags);
  2423. msm_hs_resource_unvote(msm_uport);
  2424. return 0;
  2425. sps_disconnect_rx:
  2426. sps_disconnect(sps_pipe_handle_rx);
  2427. sps_disconnect_tx:
  2428. sps_disconnect(sps_pipe_handle_tx);
  2429. unconfig_uart_gpios:
  2430. msm_hs_unconfig_uart_gpios(uport);
  2431. free_uart_irq:
  2432. free_irq(uport->irq, msm_uport);
  2433. unvote_exit:
  2434. msm_hs_resource_unvote(msm_uport);
  2435. MSM_HS_ERR("%s(): Error return\n", __func__);
  2436. return ret;
  2437. }
  2438. /* Initialize tx and rx data structures */
  2439. static int uartdm_init_port(struct uart_port *uport)
  2440. {
  2441. int ret = 0;
  2442. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2443. struct msm_hs_tx *tx = &msm_uport->tx;
  2444. struct msm_hs_rx *rx = &msm_uport->rx;
  2445. struct sched_param param = { .sched_priority = 1 };
  2446. init_waitqueue_head(&rx->wait);
  2447. init_waitqueue_head(&tx->wait);
  2448. init_waitqueue_head(&msm_uport->bam_disconnect_wait);
  2449. /* Init kernel threads for tx and rx */
  2450. kthread_init_worker(&rx->kworker);
  2451. rx->task = kthread_run(kthread_worker_fn,
  2452. &rx->kworker, "msm_serial_hs_%d_rx_work", uport->line);
  2453. if (IS_ERR(rx->task)) {
  2454. MSM_HS_ERR("%s(): error creating task", __func__);
  2455. goto exit_lh_init;
  2456. }
  2457. sched_setscheduler(rx->task, SCHED_FIFO, &param);
  2458. kthread_init_work(&rx->kwork, msm_serial_hs_rx_work);
  2459. kthread_init_worker(&tx->kworker);
  2460. tx->task = kthread_run(kthread_worker_fn,
  2461. &tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
  2462. if (IS_ERR(rx->task)) {
  2463. MSM_HS_ERR("%s(): error creating task", __func__);
  2464. goto exit_lh_init;
  2465. }
  2466. sched_setscheduler(tx->task, SCHED_FIFO, &param);
  2467. kthread_init_work(&tx->kwork, msm_serial_hs_tx_work);
  2468. rx->buffer = dma_alloc_coherent(uport->dev,
  2469. UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
  2470. &rx->rbuffer, GFP_KERNEL);
  2471. if (!rx->buffer) {
  2472. MSM_HS_ERR("%s(): cannot allocate rx->buffer", __func__);
  2473. ret = -ENOMEM;
  2474. goto exit_lh_init;
  2475. }
  2476. INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
  2477. return ret;
  2478. exit_lh_init:
  2479. kthread_stop(rx->task);
  2480. rx->task = NULL;
  2481. kthread_stop(tx->task);
  2482. tx->task = NULL;
  2483. return ret;
  2484. }
  2485. struct msm_serial_hs_platform_data
  2486. *msm_hs_dt_to_pdata(struct platform_device *pdev)
  2487. {
  2488. struct device_node *node = pdev->dev.of_node;
  2489. struct msm_serial_hs_platform_data *pdata;
  2490. u32 rx_to_inject;
  2491. int ret;
  2492. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  2493. if (!pdata)
  2494. return ERR_PTR(-ENOMEM);
  2495. pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
  2496. /* UART TX GPIO */
  2497. pdata->uart_tx_gpio = of_get_named_gpio(node,
  2498. "qcom,tx-gpio", 0);
  2499. if (pdata->uart_tx_gpio < 0)
  2500. pr_err("uart_tx_gpio is not available\n");
  2501. /* UART RX GPIO */
  2502. pdata->uart_rx_gpio = of_get_named_gpio(node,
  2503. "qcom,rx-gpio", 0);
  2504. if (pdata->uart_rx_gpio < 0)
  2505. pr_err("uart_rx_gpio is not available\n");
  2506. /* UART CTS GPIO */
  2507. pdata->uart_cts_gpio = of_get_named_gpio(node,
  2508. "qcom,cts-gpio", 0);
  2509. if (pdata->uart_cts_gpio < 0)
  2510. pr_err("uart_cts_gpio is not available\n");
  2511. /* UART RFR GPIO */
  2512. pdata->uart_rfr_gpio = of_get_named_gpio(node,
  2513. "qcom,rfr-gpio", 0);
  2514. if (pdata->uart_rfr_gpio < 0)
  2515. pr_err("uart_rfr_gpio is not available\n");
  2516. pdata->no_suspend_delay = of_property_read_bool(node,
  2517. "qcom,no-suspend-delay");
  2518. pdata->obs = of_property_read_bool(node,
  2519. "qcom,msm-obs");
  2520. if (pdata->obs)
  2521. pr_err("%s:Out of Band sleep flag is set\n", __func__);
  2522. pdata->inject_rx_on_wakeup = of_property_read_bool(node,
  2523. "qcom,inject-rx-on-wakeup");
  2524. if (pdata->inject_rx_on_wakeup) {
  2525. ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
  2526. &rx_to_inject);
  2527. if (ret < 0) {
  2528. pr_err("Error: Rx_char_to_inject not specified.\n");
  2529. return ERR_PTR(ret);
  2530. }
  2531. pdata->rx_to_inject = (u8)rx_to_inject;
  2532. }
  2533. ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
  2534. &pdata->bam_tx_ep_pipe_index);
  2535. if (ret < 0) {
  2536. pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
  2537. return ERR_PTR(ret);
  2538. }
  2539. if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
  2540. pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
  2541. pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
  2542. return ERR_PTR(-EINVAL);
  2543. }
  2544. ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
  2545. &pdata->bam_rx_ep_pipe_index);
  2546. if (ret < 0) {
  2547. pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
  2548. return ERR_PTR(ret);
  2549. }
  2550. if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
  2551. pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
  2552. pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
  2553. return ERR_PTR(-EINVAL);
  2554. }
  2555. pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
  2556. "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
  2557. pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
  2558. pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
  2559. pdata->uart_rfr_gpio);
  2560. return pdata;
  2561. }
  2562. /**
  2563. * Deallocate UART peripheral's SPS endpoint
  2564. * @msm_uport - Pointer to msm_hs_port structure
  2565. * @ep - Pointer to sps endpoint data structure
  2566. */
  2567. static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
  2568. struct msm_hs_sps_ep_conn_data *ep)
  2569. {
  2570. struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
  2571. struct sps_connect *sps_config = &ep->config;
  2572. dma_free_coherent(msm_uport->uport.dev,
  2573. sps_config->desc.size,
  2574. &sps_config->desc.phys_base,
  2575. GFP_KERNEL);
  2576. sps_free_endpoint(sps_pipe_handle);
  2577. }
  2578. /**
  2579. * Allocate UART peripheral's SPS endpoint
  2580. *
  2581. * This function allocates endpoint context
  2582. * by calling appropriate SPS driver APIs.
  2583. *
  2584. * @msm_uport - Pointer to msm_hs_port structure
  2585. * @ep - Pointer to sps endpoint data structure
  2586. * @is_produce - 1 means Producer endpoint
  2587. * - 0 means Consumer endpoint
  2588. *
  2589. * @return - 0 if successful else negative value
  2590. */
  2591. static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
  2592. struct msm_hs_sps_ep_conn_data *ep,
  2593. bool is_producer)
  2594. {
  2595. int rc = 0;
  2596. struct sps_pipe *sps_pipe_handle;
  2597. struct sps_connect *sps_config = &ep->config;
  2598. struct sps_register_event *sps_event = &ep->event;
  2599. /* Allocate endpoint context */
  2600. sps_pipe_handle = sps_alloc_endpoint();
  2601. if (!sps_pipe_handle) {
  2602. MSM_HS_ERR("%s(): sps_alloc_endpoint() failed!!\n"
  2603. "is_producer=%d", __func__, is_producer);
  2604. rc = -ENOMEM;
  2605. goto out;
  2606. }
  2607. /* Get default connection configuration for an endpoint */
  2608. rc = sps_get_config(sps_pipe_handle, sps_config);
  2609. if (rc) {
  2610. MSM_HS_ERR("%s(): failed! pipe_handle=0x%p rc=%d",
  2611. __func__, sps_pipe_handle, rc);
  2612. goto get_config_err;
  2613. }
  2614. /* Modify the default connection configuration */
  2615. if (is_producer) {
  2616. /* For UART producer transfer, source is UART peripheral
  2617. * where as destination is system memory
  2618. */
  2619. sps_config->source = msm_uport->bam_handle;
  2620. sps_config->destination = SPS_DEV_HANDLE_MEM;
  2621. sps_config->mode = SPS_MODE_SRC;
  2622. sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
  2623. sps_config->dest_pipe_index = 0;
  2624. sps_event->callback = msm_hs_sps_rx_callback;
  2625. } else {
  2626. /* For UART consumer transfer, source is system memory
  2627. * where as destination is UART peripheral
  2628. */
  2629. sps_config->source = SPS_DEV_HANDLE_MEM;
  2630. sps_config->destination = msm_uport->bam_handle;
  2631. sps_config->mode = SPS_MODE_DEST;
  2632. sps_config->src_pipe_index = 0;
  2633. sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
  2634. sps_event->callback = msm_hs_sps_tx_callback;
  2635. }
  2636. sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
  2637. sps_config->event_thresh = 0x10;
  2638. /* Allocate maximum descriptor fifo size */
  2639. sps_config->desc.size =
  2640. (1 + UART_DMA_DESC_NR) * sizeof(struct sps_iovec);
  2641. sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
  2642. sps_config->desc.size,
  2643. &sps_config->desc.phys_base,
  2644. GFP_KERNEL);
  2645. if (!sps_config->desc.base) {
  2646. rc = -ENOMEM;
  2647. MSM_HS_ERR("msm_serial_hs: dma_alloc_coherent() failed!!\n");
  2648. goto get_config_err;
  2649. }
  2650. memset(sps_config->desc.base, 0x00, sps_config->desc.size);
  2651. sps_event->mode = SPS_TRIGGER_CALLBACK;
  2652. sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
  2653. sps_event->user = (void *)msm_uport;
  2654. /* Now save the sps pipe handle */
  2655. ep->pipe_handle = sps_pipe_handle;
  2656. MSM_HS_DBG("msm_serial_hs: success !! %s: pipe_handle=0x%p\n"
  2657. "desc_fifo.phys_base=0x%pa\n",
  2658. is_producer ? "READ" : "WRITE",
  2659. sps_pipe_handle, &sps_config->desc.phys_base);
  2660. return 0;
  2661. get_config_err:
  2662. sps_free_endpoint(sps_pipe_handle);
  2663. out:
  2664. return rc;
  2665. }
  2666. /**
  2667. * Initialize SPS HW connected with UART core
  2668. *
  2669. * This function register BAM HW resources with
  2670. * SPS driver and then initialize 2 SPS endpoints
  2671. *
  2672. * msm_uport - Pointer to msm_hs_port structure
  2673. *
  2674. * @return - 0 if successful else negative value
  2675. */
  2676. static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
  2677. {
  2678. int rc = 0;
  2679. struct sps_bam_props bam = {0};
  2680. unsigned long bam_handle;
  2681. rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
  2682. if (rc || !bam_handle) {
  2683. bam.phys_addr = msm_uport->bam_mem;
  2684. bam.virt_addr = msm_uport->bam_base;
  2685. /*
  2686. * This event thresold value is only significant for BAM-to-BAM
  2687. * transfer. It's ignored for BAM-to-System mode transfer.
  2688. */
  2689. bam.event_threshold = 0x10; /* Pipe event threshold */
  2690. bam.summing_threshold = 1; /* BAM event threshold */
  2691. /* SPS driver wll handle the UART BAM IRQ */
  2692. bam.irq = (u32)msm_uport->bam_irq;
  2693. bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  2694. MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
  2695. &bam.phys_addr);
  2696. MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%p\n",
  2697. bam.virt_addr);
  2698. /* Register UART Peripheral BAM device to SPS driver */
  2699. rc = sps_register_bam_device(&bam, &bam_handle);
  2700. if (rc) {
  2701. MSM_HS_ERR("%s: BAM device register failed\n",
  2702. __func__);
  2703. return rc;
  2704. }
  2705. MSM_HS_DBG("%s:BAM device registered. bam_handle=0x%lx",
  2706. __func__, msm_uport->bam_handle);
  2707. }
  2708. msm_uport->bam_handle = bam_handle;
  2709. rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
  2710. UART_SPS_PROD_PERIPHERAL);
  2711. if (rc) {
  2712. MSM_HS_ERR("%s: Failed to Init Producer BAM-pipe", __func__);
  2713. goto deregister_bam;
  2714. }
  2715. rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
  2716. UART_SPS_CONS_PERIPHERAL);
  2717. if (rc) {
  2718. MSM_HS_ERR("%s: Failed to Init Consumer BAM-pipe", __func__);
  2719. goto deinit_ep_conn_prod;
  2720. }
  2721. return 0;
  2722. deinit_ep_conn_prod:
  2723. msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
  2724. deregister_bam:
  2725. sps_deregister_bam_device(msm_uport->bam_handle);
  2726. return rc;
  2727. }
  2728. static bool deviceid[UARTDM_NR] = {0};
  2729. /*
  2730. * The mutex synchronizes grabbing next free device number
  2731. * both in case of an alias being used or not. When alias is
  2732. * used, the msm_hs_dt_to_pdata gets it and the boolean array
  2733. * is accordingly updated with device_id_set_used. If no alias
  2734. * is used, then device_id_grab_next_free sets that array.
  2735. */
  2736. static DEFINE_MUTEX(mutex_next_device_id);
  2737. static int device_id_grab_next_free(void)
  2738. {
  2739. int i;
  2740. int ret = -ENODEV;
  2741. mutex_lock(&mutex_next_device_id);
  2742. for (i = 0; i < UARTDM_NR; i++)
  2743. if (!deviceid[i]) {
  2744. ret = i;
  2745. deviceid[i] = true;
  2746. break;
  2747. }
  2748. mutex_unlock(&mutex_next_device_id);
  2749. return ret;
  2750. }
  2751. static int device_id_set_used(int index)
  2752. {
  2753. int ret = 0;
  2754. mutex_lock(&mutex_next_device_id);
  2755. if (deviceid[index])
  2756. ret = -ENODEV;
  2757. else
  2758. deviceid[index] = true;
  2759. mutex_unlock(&mutex_next_device_id);
  2760. return ret;
  2761. }
  2762. static void obs_manage_irq(struct msm_hs_port *msm_uport, bool en)
  2763. {
  2764. struct uart_port *uport = &(msm_uport->uport);
  2765. if (msm_uport->obs) {
  2766. if (en)
  2767. enable_irq(uport->irq);
  2768. else
  2769. disable_irq(uport->irq);
  2770. }
  2771. }
  2772. static void msm_hs_pm_suspend(struct device *dev)
  2773. {
  2774. struct platform_device *pdev = to_platform_device(dev);
  2775. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  2776. int ret;
  2777. int client_count = 0;
  2778. if (!msm_uport)
  2779. goto err_suspend;
  2780. mutex_lock(&msm_uport->mtx);
  2781. client_count = atomic_read(&msm_uport->client_count);
  2782. msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
  2783. msm_hs_resource_off(msm_uport);
  2784. obs_manage_irq(msm_uport, false);
  2785. msm_hs_clk_bus_unvote(msm_uport);
  2786. /* For OBS, don't use wakeup interrupt, set gpio to suspended state */
  2787. if (msm_uport->obs) {
  2788. ret = pinctrl_select_state(msm_uport->pinctrl,
  2789. msm_uport->gpio_state_suspend);
  2790. if (ret)
  2791. MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
  2792. __func__);
  2793. }
  2794. if (!atomic_read(&msm_uport->client_req_state))
  2795. enable_wakeup_interrupt(msm_uport);
  2796. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  2797. "%s: PM State Suspended client_count %d\n", __func__,
  2798. client_count);
  2799. mutex_unlock(&msm_uport->mtx);
  2800. return;
  2801. err_suspend:
  2802. pr_err("%s(): invalid uport", __func__);
  2803. }
  2804. static int msm_hs_pm_resume(struct device *dev)
  2805. {
  2806. struct platform_device *pdev = to_platform_device(dev);
  2807. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  2808. int ret = 0;
  2809. int client_count = 0;
  2810. if (!msm_uport) {
  2811. dev_err(dev, "%s:Invalid uport\n", __func__);
  2812. return -ENODEV;
  2813. }
  2814. mutex_lock(&msm_uport->mtx);
  2815. client_count = atomic_read(&msm_uport->client_count);
  2816. if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
  2817. goto exit_pm_resume;
  2818. if (!atomic_read(&msm_uport->client_req_state))
  2819. disable_wakeup_interrupt(msm_uport);
  2820. /* For OBS, don't use wakeup interrupt, set gpio to active state */
  2821. if (msm_uport->obs) {
  2822. ret = pinctrl_select_state(msm_uport->pinctrl,
  2823. msm_uport->gpio_state_active);
  2824. if (ret)
  2825. MSM_HS_ERR("%s():Error selecting active state",
  2826. __func__);
  2827. }
  2828. ret = msm_hs_clk_bus_vote(msm_uport);
  2829. if (ret) {
  2830. MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
  2831. dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
  2832. goto exit_pm_resume;
  2833. }
  2834. obs_manage_irq(msm_uport, true);
  2835. msm_uport->pm_state = MSM_HS_PM_ACTIVE;
  2836. msm_hs_resource_on(msm_uport);
  2837. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  2838. "%s:PM State:Active client_count %d\n", __func__, client_count);
  2839. exit_pm_resume:
  2840. mutex_unlock(&msm_uport->mtx);
  2841. return ret;
  2842. }
  2843. #ifdef CONFIG_PM
  2844. static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
  2845. {
  2846. struct platform_device *pdev = to_platform_device(dev);
  2847. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  2848. enum msm_hs_pm_state prev_pwr_state;
  2849. int clk_cnt, client_count, ret = 0;
  2850. if (IS_ERR_OR_NULL(msm_uport))
  2851. return -ENODEV;
  2852. mutex_lock(&msm_uport->mtx);
  2853. /*
  2854. * If there is an active clk request or an impending userspace request
  2855. * fail the suspend callback.
  2856. */
  2857. clk_cnt = atomic_read(&msm_uport->resource_count);
  2858. client_count = atomic_read(&msm_uport->client_count);
  2859. if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
  2860. MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
  2861. __func__, clk_cnt, client_count);
  2862. ret = -EBUSY;
  2863. goto exit_suspend_noirq;
  2864. }
  2865. prev_pwr_state = msm_uport->pm_state;
  2866. msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
  2867. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  2868. "%s:PM State:Sys-Suspended client_count %d\n", __func__,
  2869. client_count);
  2870. exit_suspend_noirq:
  2871. mutex_unlock(&msm_uport->mtx);
  2872. return ret;
  2873. };
  2874. static int msm_hs_pm_sys_resume_noirq(struct device *dev)
  2875. {
  2876. struct platform_device *pdev = to_platform_device(dev);
  2877. struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
  2878. if (IS_ERR_OR_NULL(msm_uport))
  2879. return -ENODEV;
  2880. /*
  2881. * Note system-pm resume and update the state
  2882. * variable. Resource activation will be done
  2883. * when transfer is requested.
  2884. */
  2885. mutex_lock(&msm_uport->mtx);
  2886. if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
  2887. msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
  2888. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  2889. "%s:PM State: Suspended\n", __func__);
  2890. mutex_unlock(&msm_uport->mtx);
  2891. return 0;
  2892. }
  2893. #endif
  2894. #ifdef CONFIG_PM
  2895. static void msm_serial_hs_rt_init(struct uart_port *uport)
  2896. {
  2897. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  2898. MSM_HS_INFO("%s(): Enabling runtime pm", __func__);
  2899. pm_runtime_set_suspended(uport->dev);
  2900. pm_runtime_set_autosuspend_delay(uport->dev, 100);
  2901. pm_runtime_use_autosuspend(uport->dev);
  2902. mutex_lock(&msm_uport->mtx);
  2903. msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
  2904. mutex_unlock(&msm_uport->mtx);
  2905. pm_runtime_enable(uport->dev);
  2906. tty_port_set_policy(&uport->state->port, SCHED_FIFO, 1);
  2907. }
  2908. static int msm_hs_runtime_suspend(struct device *dev)
  2909. {
  2910. msm_hs_pm_suspend(dev);
  2911. return 0;
  2912. }
  2913. static int msm_hs_runtime_resume(struct device *dev)
  2914. {
  2915. return msm_hs_pm_resume(dev);
  2916. }
  2917. #else
  2918. static void msm_serial_hs_rt_init(struct uart_port *uport) {}
  2919. static int msm_hs_runtime_suspend(struct device *dev) {}
  2920. static int msm_hs_runtime_resume(struct device *dev) {}
  2921. #endif
  2922. static int msm_hs_probe(struct platform_device *pdev)
  2923. {
  2924. int ret = 0;
  2925. struct uart_port *uport;
  2926. struct msm_hs_port *msm_uport;
  2927. struct resource *core_resource;
  2928. struct resource *bam_resource;
  2929. int core_irqres, bam_irqres, wakeup_irqres;
  2930. struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
  2931. char name[30];
  2932. if (pdev->dev.of_node) {
  2933. dev_dbg(&pdev->dev, "device tree enabled\n");
  2934. pdata = msm_hs_dt_to_pdata(pdev);
  2935. if (IS_ERR(pdata))
  2936. return PTR_ERR(pdata);
  2937. if (pdev->id < 0) {
  2938. pdev->id = device_id_grab_next_free();
  2939. if (pdev->id < 0) {
  2940. dev_err(&pdev->dev,
  2941. "Error grabbing next free device id");
  2942. return pdev->id;
  2943. }
  2944. } else {
  2945. ret = device_id_set_used(pdev->id);
  2946. if (ret < 0) {
  2947. dev_err(&pdev->dev, "%d alias taken",
  2948. pdev->id);
  2949. return ret;
  2950. }
  2951. }
  2952. pdev->dev.platform_data = pdata;
  2953. }
  2954. if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
  2955. dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
  2956. pdev->id);
  2957. return -EINVAL;
  2958. }
  2959. msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
  2960. GFP_KERNEL);
  2961. if (!msm_uport)
  2962. return -ENOMEM;
  2963. msm_uport->uport.type = PORT_UNKNOWN;
  2964. uport = &msm_uport->uport;
  2965. uport->dev = &pdev->dev;
  2966. if (pdev->dev.of_node)
  2967. msm_uport->uart_type = BLSP_HSUART;
  2968. msm_hs_get_pinctrl_configs(uport);
  2969. /* Get required resources for BAM HSUART */
  2970. core_resource = platform_get_resource_byname(pdev,
  2971. IORESOURCE_MEM, "core_mem");
  2972. if (!core_resource) {
  2973. dev_err(&pdev->dev, "Invalid core HSUART Resources.\n");
  2974. return -ENXIO;
  2975. }
  2976. bam_resource = platform_get_resource_byname(pdev,
  2977. IORESOURCE_MEM, "bam_mem");
  2978. if (!bam_resource) {
  2979. dev_err(&pdev->dev, "Invalid BAM HSUART Resources.\n");
  2980. return -ENXIO;
  2981. }
  2982. core_irqres = platform_get_irq_byname(pdev, "core_irq");
  2983. if (core_irqres < 0) {
  2984. dev_err(&pdev->dev, "Error %d, invalid core irq resources.\n",
  2985. core_irqres);
  2986. return -ENXIO;
  2987. }
  2988. bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
  2989. if (bam_irqres < 0) {
  2990. dev_err(&pdev->dev, "Error %d, invalid bam irq resources.\n",
  2991. bam_irqres);
  2992. return -ENXIO;
  2993. }
  2994. wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
  2995. if (wakeup_irqres < 0) {
  2996. wakeup_irqres = -1;
  2997. pr_info("Wakeup irq not specified.\n");
  2998. }
  2999. uport->mapbase = core_resource->start;
  3000. uport->membase = ioremap(uport->mapbase,
  3001. resource_size(core_resource));
  3002. if (unlikely(!uport->membase)) {
  3003. dev_err(&pdev->dev, "UART Resource ioremap Failed.\n");
  3004. return -ENOMEM;
  3005. }
  3006. msm_uport->bam_mem = bam_resource->start;
  3007. msm_uport->bam_base = ioremap(msm_uport->bam_mem,
  3008. resource_size(bam_resource));
  3009. if (unlikely(!msm_uport->bam_base)) {
  3010. dev_err(&pdev->dev, "UART BAM Resource ioremap Failed.\n");
  3011. iounmap(uport->membase);
  3012. return -ENOMEM;
  3013. }
  3014. memset(name, 0, sizeof(name));
  3015. scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
  3016. "_state");
  3017. msm_uport->ipc_msm_hs_log_ctxt =
  3018. ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
  3019. name, 0);
  3020. if (!msm_uport->ipc_msm_hs_log_ctxt) {
  3021. dev_err(&pdev->dev, "%s: error creating logging context",
  3022. __func__);
  3023. } else {
  3024. msm_uport->ipc_debug_mask = INFO_LEV;
  3025. ret = sysfs_create_file(&pdev->dev.kobj,
  3026. &dev_attr_debug_mask.attr);
  3027. if (unlikely(ret))
  3028. MSM_HS_WARN("%s: Failed to create dev. attr", __func__);
  3029. }
  3030. uport->irq = core_irqres;
  3031. msm_uport->bam_irq = bam_irqres;
  3032. pdata->wakeup_irq = wakeup_irqres;
  3033. msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
  3034. if (!msm_uport->bus_scale_table) {
  3035. MSM_HS_ERR("BLSP UART: Bus scaling is disabled.\n");
  3036. } else {
  3037. msm_uport->bus_perf_client =
  3038. msm_bus_scale_register_client
  3039. (msm_uport->bus_scale_table);
  3040. if (IS_ERR(&msm_uport->bus_perf_client)) {
  3041. MSM_HS_ERR("%s():Bus client register failed\n",
  3042. __func__);
  3043. ret = -EINVAL;
  3044. goto unmap_memory;
  3045. }
  3046. }
  3047. msm_uport->wakeup.irq = pdata->wakeup_irq;
  3048. msm_uport->wakeup.ignore = 1;
  3049. msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
  3050. msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
  3051. msm_uport->obs = pdata->obs;
  3052. msm_uport->bam_tx_ep_pipe_index =
  3053. pdata->bam_tx_ep_pipe_index;
  3054. msm_uport->bam_rx_ep_pipe_index =
  3055. pdata->bam_rx_ep_pipe_index;
  3056. msm_uport->wakeup.enabled = true;
  3057. uport->iotype = UPIO_MEM;
  3058. uport->fifosize = 64;
  3059. uport->ops = &msm_hs_ops;
  3060. uport->flags = UPF_BOOT_AUTOCONF;
  3061. uport->uartclk = 7372800;
  3062. msm_uport->imr_reg = 0x0;
  3063. msm_uport->clk = clk_get(&pdev->dev, "core_clk");
  3064. if (IS_ERR(msm_uport->clk)) {
  3065. ret = PTR_ERR(msm_uport->clk);
  3066. goto deregister_bus_client;
  3067. }
  3068. msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
  3069. /*
  3070. * Some configurations do not require explicit pclk control so
  3071. * do not flag error on pclk get failure.
  3072. */
  3073. if (IS_ERR(msm_uport->pclk))
  3074. msm_uport->pclk = NULL;
  3075. msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
  3076. WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
  3077. if (!msm_uport->hsuart_wq) {
  3078. MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
  3079. __func__);
  3080. ret = -ENOMEM;
  3081. goto put_clk;
  3082. }
  3083. mutex_init(&msm_uport->mtx);
  3084. /* Initialize SPS HW connected with UART core */
  3085. ret = msm_hs_sps_init(msm_uport);
  3086. if (unlikely(ret)) {
  3087. MSM_HS_ERR("SPS Initialization failed ! err=%d", ret);
  3088. goto destroy_mutex;
  3089. }
  3090. msm_uport->tx.flush = FLUSH_SHUTDOWN;
  3091. msm_uport->rx.flush = FLUSH_SHUTDOWN;
  3092. memset(name, 0, sizeof(name));
  3093. scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
  3094. "_tx");
  3095. msm_uport->tx.ipc_tx_ctxt =
  3096. ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
  3097. if (!msm_uport->tx.ipc_tx_ctxt)
  3098. dev_err(&pdev->dev, "%s: error creating tx logging context",
  3099. __func__);
  3100. memset(name, 0, sizeof(name));
  3101. scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
  3102. "_rx");
  3103. msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
  3104. IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
  3105. if (!msm_uport->rx.ipc_rx_ctxt)
  3106. dev_err(&pdev->dev, "%s: error creating rx logging context",
  3107. __func__);
  3108. memset(name, 0, sizeof(name));
  3109. scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
  3110. "_pwr");
  3111. msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
  3112. IPC_MSM_HS_LOG_USER_PAGES, name, 0);
  3113. if (!msm_uport->ipc_msm_hs_pwr_ctxt)
  3114. dev_err(&pdev->dev, "%s: error creating usr logging context",
  3115. __func__);
  3116. uport->irq = core_irqres;
  3117. msm_uport->bam_irq = bam_irqres;
  3118. clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
  3119. msm_hs_clk_bus_vote(msm_uport);
  3120. ret = uartdm_init_port(uport);
  3121. if (unlikely(ret))
  3122. goto err_clock;
  3123. ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
  3124. if (unlikely(ret)) {
  3125. MSM_HS_ERR("Probe Failed as sysfs failed\n");
  3126. goto err_clock;
  3127. }
  3128. msm_serial_debugfs_init(msm_uport, pdev->id);
  3129. msm_hs_unconfig_uart_gpios(uport);
  3130. uport->line = pdev->id;
  3131. if (pdata->userid && pdata->userid <= UARTDM_NR)
  3132. uport->line = pdata->userid;
  3133. ret = uart_add_one_port(&msm_hs_driver, uport);
  3134. if (!ret) {
  3135. msm_hs_clk_bus_unvote(msm_uport);
  3136. msm_serial_hs_rt_init(uport);
  3137. return ret;
  3138. }
  3139. err_clock:
  3140. msm_hs_clk_bus_unvote(msm_uport);
  3141. destroy_mutex:
  3142. mutex_destroy(&msm_uport->mtx);
  3143. destroy_workqueue(msm_uport->hsuart_wq);
  3144. put_clk:
  3145. if (msm_uport->pclk)
  3146. clk_put(msm_uport->pclk);
  3147. if (msm_uport->clk)
  3148. clk_put(msm_uport->clk);
  3149. deregister_bus_client:
  3150. msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
  3151. unmap_memory:
  3152. iounmap(uport->membase);
  3153. iounmap(msm_uport->bam_base);
  3154. return ret;
  3155. }
  3156. static int __init msm_serial_hs_init(void)
  3157. {
  3158. int ret;
  3159. ret = uart_register_driver(&msm_hs_driver);
  3160. if (unlikely(ret)) {
  3161. pr_err("%s failed to load\n", __func__);
  3162. return ret;
  3163. }
  3164. debug_base = debugfs_create_dir("msm_serial_hs", NULL);
  3165. if (IS_ERR_OR_NULL(debug_base))
  3166. pr_err("msm_serial_hs: Cannot create debugfs dir\n");
  3167. ret = platform_driver_register(&msm_serial_hs_platform_driver);
  3168. if (ret) {
  3169. pr_err("%s failed to load\n", __func__);
  3170. debugfs_remove_recursive(debug_base);
  3171. uart_unregister_driver(&msm_hs_driver);
  3172. return ret;
  3173. }
  3174. pr_info("msm_serial_hs module loaded\n");
  3175. return ret;
  3176. }
  3177. /*
  3178. * Called by the upper layer when port is closed.
  3179. * - Disables the port
  3180. * - Unhook the ISR
  3181. */
  3182. static void msm_hs_shutdown(struct uart_port *uport)
  3183. {
  3184. int ret, rc;
  3185. struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
  3186. struct circ_buf *tx_buf = &uport->state->xmit;
  3187. int data;
  3188. unsigned long flags;
  3189. if (is_use_low_power_wakeup(msm_uport))
  3190. irq_set_irq_wake(msm_uport->wakeup.irq, 0);
  3191. if (msm_uport->wakeup.enabled)
  3192. disable_irq(msm_uport->wakeup.irq);
  3193. else
  3194. disable_irq(uport->irq);
  3195. spin_lock_irqsave(&uport->lock, flags);
  3196. msm_uport->wakeup.enabled = false;
  3197. msm_uport->wakeup.ignore = 1;
  3198. spin_unlock_irqrestore(&uport->lock, flags);
  3199. /* Free the interrupt */
  3200. free_irq(uport->irq, msm_uport);
  3201. if (is_use_low_power_wakeup(msm_uport)) {
  3202. free_irq(msm_uport->wakeup.irq, msm_uport);
  3203. MSM_HS_DBG("%s(): wakeup irq freed", __func__);
  3204. }
  3205. msm_uport->wakeup.freed = true;
  3206. /* make sure tx lh finishes */
  3207. kthread_flush_worker(&msm_uport->tx.kworker);
  3208. ret = wait_event_timeout(msm_uport->tx.wait,
  3209. uart_circ_empty(tx_buf), 500);
  3210. if (!ret)
  3211. MSM_HS_WARN("Shutdown called when tx buff not empty");
  3212. msm_hs_resource_vote(msm_uport);
  3213. /* Stop remote side from sending data */
  3214. msm_hs_disable_flow_control(uport, false);
  3215. /* make sure rx lh finishes */
  3216. kthread_flush_worker(&msm_uport->rx.kworker);
  3217. if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
  3218. /* disable and disconnect rx */
  3219. ret = wait_event_timeout(msm_uport->rx.wait,
  3220. !msm_uport->rx.pending_flag, 500);
  3221. if (!ret)
  3222. MSM_HS_WARN("%s(): rx disconnect not complete",
  3223. __func__);
  3224. msm_hs_disconnect_rx(uport);
  3225. }
  3226. cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
  3227. flush_workqueue(msm_uport->hsuart_wq);
  3228. /* BAM Disconnect for TX */
  3229. data = msm_hs_read(uport, UART_DM_DMEN);
  3230. data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
  3231. msm_hs_write(uport, UART_DM_DMEN, data);
  3232. ret = sps_tx_disconnect(msm_uport);
  3233. if (ret)
  3234. MSM_HS_ERR("%s(): sps_disconnect failed\n",
  3235. __func__);
  3236. msm_uport->tx.flush = FLUSH_SHUTDOWN;
  3237. /* Disable the transmitter */
  3238. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
  3239. /* Disable the receiver */
  3240. msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
  3241. msm_uport->imr_reg = 0;
  3242. msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
  3243. /*
  3244. * Complete all device write before actually disabling uartclk.
  3245. * Hence mb() requires here.
  3246. */
  3247. mb();
  3248. msm_uport->rx.buffer_pending = NONE_PENDING;
  3249. MSM_HS_DBG("%s(): tx, rx events complete", __func__);
  3250. dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
  3251. UART_XMIT_SIZE, DMA_TO_DEVICE);
  3252. msm_hs_resource_unvote(msm_uport);
  3253. rc = atomic_read(&msm_uport->resource_count);
  3254. if (rc) {
  3255. atomic_set(&msm_uport->resource_count, 1);
  3256. MSM_HS_WARN("%s(): removing extra vote\n", __func__);
  3257. msm_hs_resource_unvote(msm_uport);
  3258. }
  3259. if (atomic_read(&msm_uport->client_req_state)) {
  3260. MSM_HS_WARN("%s: Client clock vote imbalance\n", __func__);
  3261. atomic_set(&msm_uport->client_req_state, 0);
  3262. }
  3263. if (atomic_read(&msm_uport->client_count)) {
  3264. MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
  3265. atomic_set(&msm_uport->client_count, 0);
  3266. LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
  3267. "%s: Client_Count 0\n", __func__);
  3268. }
  3269. msm_hs_unconfig_uart_gpios(uport);
  3270. MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
  3271. }
  3272. static void __exit msm_serial_hs_exit(void)
  3273. {
  3274. pr_info("msm_serial_hs module removed\n");
  3275. debugfs_remove_recursive(debug_base);
  3276. platform_driver_unregister(&msm_serial_hs_platform_driver);
  3277. uart_unregister_driver(&msm_hs_driver);
  3278. }
  3279. static const struct dev_pm_ops msm_hs_dev_pm_ops = {
  3280. .runtime_suspend = msm_hs_runtime_suspend,
  3281. .runtime_resume = msm_hs_runtime_resume,
  3282. .runtime_idle = NULL,
  3283. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(msm_hs_pm_sys_suspend_noirq,
  3284. msm_hs_pm_sys_resume_noirq)
  3285. };
  3286. static struct platform_driver msm_serial_hs_platform_driver = {
  3287. .probe = msm_hs_probe,
  3288. .remove = msm_hs_remove,
  3289. .driver = {
  3290. .name = "msm_serial_hs",
  3291. .pm = &msm_hs_dev_pm_ops,
  3292. .of_match_table = msm_hs_match_table,
  3293. },
  3294. };
  3295. static struct uart_driver msm_hs_driver = {
  3296. .owner = THIS_MODULE,
  3297. .driver_name = "msm_serial_hs",
  3298. .dev_name = "ttyHS",
  3299. .nr = UARTDM_NR,
  3300. .cons = 0,
  3301. };
  3302. static const struct uart_ops msm_hs_ops = {
  3303. .tx_empty = msm_hs_tx_empty,
  3304. .set_mctrl = msm_hs_set_mctrl_locked,
  3305. .get_mctrl = msm_hs_get_mctrl_locked,
  3306. .stop_tx = msm_hs_stop_tx_locked,
  3307. .start_tx = msm_hs_start_tx_locked,
  3308. .stop_rx = msm_hs_stop_rx_locked,
  3309. .enable_ms = msm_hs_enable_ms_locked,
  3310. .break_ctl = msm_hs_break_ctl,
  3311. .startup = msm_hs_startup,
  3312. .shutdown = msm_hs_shutdown,
  3313. .set_termios = msm_hs_set_termios,
  3314. .type = msm_hs_type,
  3315. .config_port = msm_hs_config_port,
  3316. .flush_buffer = NULL,
  3317. .ioctl = msm_hs_ioctl,
  3318. };
  3319. module_init(msm_serial_hs_init);
  3320. module_exit(msm_serial_hs_exit);
  3321. MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
  3322. MODULE_LICENSE("GPL v2");