cpr-regulator.c 169 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184
  1. /*
  2. * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) "%s: " fmt, __func__
  14. #include <linux/module.h>
  15. #include <linux/cpu.h>
  16. #include <linux/cpu_pm.h>
  17. #include <linux/cpumask.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/string.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/init.h>
  24. #include <linux/io.h>
  25. #include <linux/bitops.h>
  26. #include <linux/slab.h>
  27. #include <linux/of.h>
  28. #include <linux/of_device.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm_opp.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/sort.h>
  34. #include <linux/uaccess.h>
  35. #include <linux/regulator/driver.h>
  36. #include <linux/regulator/machine.h>
  37. #include <linux/regulator/of_regulator.h>
  38. #include <linux/regulator/cpr-regulator.h>
  39. #include <soc/qcom/scm.h>
  40. /* Register Offsets for RB-CPR and Bit Definitions */
  41. /* RBCPR Version Register */
  42. #define REG_RBCPR_VERSION 0
  43. #define RBCPR_VER_2 0x02
  44. /* RBCPR Gate Count and Target Registers */
  45. #define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
  46. #define RBCPR_GCNT_TARGET_GCNT_BITS 10
  47. #define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
  48. #define RBCPR_GCNT_TARGET_GCNT_MASK ((1<<RBCPR_GCNT_TARGET_GCNT_BITS)-1)
  49. /* RBCPR Sensor Mask and Bypass Registers */
  50. #define REG_RBCPR_SENSOR_MASK0 0x20
  51. #define RBCPR_SENSOR_MASK0_SENSOR(n) (~BIT(n))
  52. #define REG_RBCPR_SENSOR_BYPASS0 0x30
  53. /* RBCPR Timer Control */
  54. #define REG_RBCPR_TIMER_INTERVAL 0x44
  55. #define REG_RBIF_TIMER_ADJUST 0x4C
  56. #define RBIF_TIMER_ADJ_CONS_UP_BITS 4
  57. #define RBIF_TIMER_ADJ_CONS_UP_MASK ((1<<RBIF_TIMER_ADJ_CONS_UP_BITS)-1)
  58. #define RBIF_TIMER_ADJ_CONS_DOWN_BITS 4
  59. #define RBIF_TIMER_ADJ_CONS_DOWN_MASK ((1<<RBIF_TIMER_ADJ_CONS_DOWN_BITS)-1)
  60. #define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
  61. #define RBIF_TIMER_ADJ_CLAMP_INT_BITS 8
  62. #define RBIF_TIMER_ADJ_CLAMP_INT_MASK ((1<<RBIF_TIMER_ADJ_CLAMP_INT_BITS)-1)
  63. #define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
  64. /* RBCPR Config Register */
  65. #define REG_RBIF_LIMIT 0x48
  66. #define REG_RBCPR_STEP_QUOT 0x80
  67. #define REG_RBIF_SW_VLEVEL 0x94
  68. #define RBIF_LIMIT_CEILING_BITS 6
  69. #define RBIF_LIMIT_CEILING_MASK ((1<<RBIF_LIMIT_CEILING_BITS)-1)
  70. #define RBIF_LIMIT_CEILING_SHIFT 6
  71. #define RBIF_LIMIT_FLOOR_BITS 6
  72. #define RBIF_LIMIT_FLOOR_MASK ((1<<RBIF_LIMIT_FLOOR_BITS)-1)
  73. #define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
  74. #define RBIF_LIMIT_FLOOR_DEFAULT 0
  75. #define RBIF_SW_VLEVEL_DEFAULT 0x20
  76. #define RBCPR_STEP_QUOT_STEPQUOT_BITS 8
  77. #define RBCPR_STEP_QUOT_STEPQUOT_MASK ((1<<RBCPR_STEP_QUOT_STEPQUOT_BITS)-1)
  78. #define RBCPR_STEP_QUOT_IDLE_CLK_BITS 4
  79. #define RBCPR_STEP_QUOT_IDLE_CLK_MASK ((1<<RBCPR_STEP_QUOT_IDLE_CLK_BITS)-1)
  80. #define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
  81. /* RBCPR Control Register */
  82. #define REG_RBCPR_CTL 0x90
  83. #define RBCPR_CTL_LOOP_EN BIT(0)
  84. #define RBCPR_CTL_TIMER_EN BIT(3)
  85. #define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
  86. #define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
  87. #define RBCPR_CTL_COUNT_MODE BIT(10)
  88. #define RBCPR_CTL_UP_THRESHOLD_BITS 4
  89. #define RBCPR_CTL_UP_THRESHOLD_MASK ((1<<RBCPR_CTL_UP_THRESHOLD_BITS)-1)
  90. #define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
  91. #define RBCPR_CTL_DN_THRESHOLD_BITS 4
  92. #define RBCPR_CTL_DN_THRESHOLD_MASK ((1<<RBCPR_CTL_DN_THRESHOLD_BITS)-1)
  93. #define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
  94. /* RBCPR Ack/Nack Response */
  95. #define REG_RBIF_CONT_ACK_CMD 0x98
  96. #define REG_RBIF_CONT_NACK_CMD 0x9C
  97. /* RBCPR Result status Registers */
  98. #define REG_RBCPR_RESULT_0 0xA0
  99. #define REG_RBCPR_RESULT_1 0xA4
  100. #define RBCPR_RESULT_1_SEL_FAST_BITS 3
  101. #define RBCPR_RESULT_1_SEL_FAST(val) (val & \
  102. ((1<<RBCPR_RESULT_1_SEL_FAST_BITS) - 1))
  103. #define RBCPR_RESULT0_BUSY_SHIFT 19
  104. #define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
  105. #define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
  106. #define RBCPR_RESULT0_ERROR_SHIFT 6
  107. #define RBCPR_RESULT0_ERROR_BITS 12
  108. #define RBCPR_RESULT0_ERROR_MASK ((1<<RBCPR_RESULT0_ERROR_BITS)-1)
  109. #define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
  110. #define RBCPR_RESULT0_ERROR_STEPS_BITS 4
  111. #define RBCPR_RESULT0_ERROR_STEPS_MASK ((1<<RBCPR_RESULT0_ERROR_STEPS_BITS)-1)
  112. #define RBCPR_RESULT0_STEP_UP_SHIFT 1
  113. /* RBCPR Interrupt Control Register */
  114. #define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n)
  115. #define REG_RBIF_IRQ_CLEAR 0x110
  116. #define REG_RBIF_IRQ_STATUS 0x114
  117. #define CPR_INT_DONE BIT(0)
  118. #define CPR_INT_MIN BIT(1)
  119. #define CPR_INT_DOWN BIT(2)
  120. #define CPR_INT_MID BIT(3)
  121. #define CPR_INT_UP BIT(4)
  122. #define CPR_INT_MAX BIT(5)
  123. #define CPR_INT_CLAMP BIT(6)
  124. #define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
  125. CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
  126. #define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
  127. #define CPR_NUM_RING_OSC 8
  128. /* RBCPR Debug Resgister */
  129. #define REG_RBCPR_DEBUG1 0x120
  130. #define RBCPR_DEBUG1_QUOT_FAST_BITS 12
  131. #define RBCPR_DEBUG1_QUOT_SLOW_BITS 12
  132. #define RBCPR_DEBUG1_QUOT_SLOW_SHIFT 12
  133. #define RBCPR_DEBUG1_QUOT_FAST(val) (val & \
  134. ((1<<RBCPR_DEBUG1_QUOT_FAST_BITS)-1))
  135. #define RBCPR_DEBUG1_QUOT_SLOW(val) ((val>>RBCPR_DEBUG1_QUOT_SLOW_SHIFT) & \
  136. ((1<<RBCPR_DEBUG1_QUOT_SLOW_BITS)-1))
  137. /* RBCPR Aging Resgister */
  138. #define REG_RBCPR_HTOL_AGE 0x160
  139. #define RBCPR_HTOL_AGE_PAGE BIT(1)
  140. #define RBCPR_AGE_DATA_STATUS BIT(2)
  141. /* RBCPR Clock Control Register */
  142. #define RBCPR_CLK_SEL_MASK BIT(0)
  143. #define RBCPR_CLK_SEL_19P2_MHZ 0
  144. #define RBCPR_CLK_SEL_AHB_CLK BIT(0)
  145. /* CPR eFuse parameters */
  146. #define CPR_FUSE_TARGET_QUOT_BITS 12
  147. #define CPR_FUSE_TARGET_QUOT_BITS_MASK ((1<<CPR_FUSE_TARGET_QUOT_BITS)-1)
  148. #define CPR_FUSE_RO_SEL_BITS 3
  149. #define CPR_FUSE_RO_SEL_BITS_MASK ((1<<CPR_FUSE_RO_SEL_BITS)-1)
  150. #define CPR_FUSE_MIN_QUOT_DIFF 50
  151. #define BYTES_PER_FUSE_ROW 8
  152. #define SPEED_BIN_NONE UINT_MAX
  153. #define FUSE_REVISION_UNKNOWN (-1)
  154. #define FUSE_MAP_NO_MATCH (-1)
  155. #define FUSE_PARAM_MATCH_ANY 0xFFFFFFFF
  156. #define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
  157. #define FLAGS_SET_MIN_VOLTAGE BIT(1)
  158. #define FLAGS_UPLIFT_QUOT_VOLT BIT(2)
  159. /*
  160. * The number of individual aging measurements to perform which are then
  161. * averaged together in order to determine the final aging adjustment value.
  162. */
  163. #define CPR_AGING_MEASUREMENT_ITERATIONS 16
  164. /*
  165. * Aging measurements for the aged and unaged ring oscillators take place a few
  166. * microseconds apart. If the vdd-supply voltage fluctuates between the two
  167. * measurements, then the difference between them will be incorrect. The
  168. * difference could end up too high or too low. This constant defines the
  169. * number of lowest and highest measurements to ignore when averaging.
  170. */
  171. #define CPR_AGING_MEASUREMENT_FILTER 3
  172. #define CPR_REGULATOR_DRIVER_NAME "qcom,cpr-regulator"
  173. /**
  174. * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
  175. * %VDD_MX_VMIN_APC: Equal to APC voltage
  176. * %VDD_MX_VMIN_APC_CORNER_CEILING: Equal to PVS corner ceiling voltage
  177. * %VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
  178. * Equal to slow speed corner ceiling
  179. * %VDD_MX_VMIN_MX_VMAX: Equal to specified vdd-mx-vmax voltage
  180. * %VDD_MX_VMIN_APC_CORNER_MAP: Equal to the APC corner mapped MX
  181. * voltage
  182. */
  183. enum vdd_mx_vmin_method {
  184. VDD_MX_VMIN_APC,
  185. VDD_MX_VMIN_APC_CORNER_CEILING,
  186. VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
  187. VDD_MX_VMIN_MX_VMAX,
  188. VDD_MX_VMIN_APC_FUSE_CORNER_MAP,
  189. VDD_MX_VMIN_APC_CORNER_MAP,
  190. };
  191. #define CPR_CORNER_MIN 1
  192. #define CPR_FUSE_CORNER_MIN 1
  193. /*
  194. * This is an arbitrary upper limit which is used in a sanity check in order to
  195. * avoid excessive memory allocation due to bad device tree data.
  196. */
  197. #define CPR_FUSE_CORNER_LIMIT 100
  198. struct quot_adjust_info {
  199. int speed_bin;
  200. int virtual_corner;
  201. int quot_adjust;
  202. };
  203. struct cpr_quot_scale {
  204. u32 offset;
  205. u32 multiplier;
  206. };
  207. struct cpr_aging_sensor_info {
  208. u32 sensor_id;
  209. int initial_quot_diff;
  210. int current_quot_diff;
  211. };
  212. struct cpr_aging_info {
  213. struct cpr_aging_sensor_info *sensor_info;
  214. int num_aging_sensors;
  215. int aging_corner;
  216. u32 aging_ro_kv;
  217. u32 *aging_derate;
  218. u32 aging_sensor_bypass;
  219. u32 max_aging_margin;
  220. u32 aging_ref_voltage;
  221. u32 cpr_ro_kv[CPR_NUM_RING_OSC];
  222. int *voltage_adjust;
  223. bool cpr_aging_error;
  224. bool cpr_aging_done;
  225. };
  226. static const char * const vdd_apc_name[] = {"vdd-apc-optional-prim",
  227. "vdd-apc-optional-sec",
  228. "vdd-apc"};
  229. enum voltage_change_dir {
  230. NO_CHANGE,
  231. DOWN,
  232. UP,
  233. };
  234. struct cpr_regulator {
  235. struct list_head list;
  236. struct regulator_desc rdesc;
  237. struct regulator_dev *rdev;
  238. bool vreg_enabled;
  239. int corner;
  240. int ceiling_max;
  241. struct dentry *debugfs;
  242. /* eFuse parameters */
  243. phys_addr_t efuse_addr;
  244. void __iomem *efuse_base;
  245. u64 *remapped_row;
  246. u32 remapped_row_base;
  247. int num_remapped_rows;
  248. /* Process voltage parameters */
  249. u32 *pvs_corner_v;
  250. /* Process voltage variables */
  251. u32 pvs_bin;
  252. u32 speed_bin;
  253. u32 pvs_version;
  254. /* APC voltage regulator */
  255. struct regulator *vdd_apc;
  256. /* Dependency parameters */
  257. struct regulator *vdd_mx;
  258. int vdd_mx_vmax;
  259. int vdd_mx_vmin_method;
  260. int vdd_mx_vmin;
  261. int *vdd_mx_corner_map;
  262. struct regulator *rpm_apc_vreg;
  263. int *rpm_apc_corner_map;
  264. /* mem-acc regulator */
  265. struct regulator *mem_acc_vreg;
  266. /* CPR parameters */
  267. u32 num_fuse_corners;
  268. u64 cpr_fuse_bits;
  269. bool cpr_fuse_disable;
  270. bool cpr_fuse_local;
  271. bool cpr_fuse_redundant;
  272. int cpr_fuse_revision;
  273. int cpr_fuse_map_count;
  274. int cpr_fuse_map_match;
  275. int *cpr_fuse_target_quot;
  276. int *cpr_fuse_ro_sel;
  277. int *fuse_quot_offset;
  278. int gcnt;
  279. unsigned int cpr_irq;
  280. void __iomem *rbcpr_base;
  281. phys_addr_t rbcpr_clk_addr;
  282. struct mutex cpr_mutex;
  283. int *cpr_max_ceiling;
  284. int *ceiling_volt;
  285. int *floor_volt;
  286. int *fuse_ceiling_volt;
  287. int *fuse_floor_volt;
  288. int *last_volt;
  289. int *open_loop_volt;
  290. int step_volt;
  291. int *save_ctl;
  292. int *save_irq;
  293. int *vsens_corner_map;
  294. /* vsens status */
  295. bool vsens_enabled;
  296. /* vsens regulators */
  297. struct regulator *vdd_vsens_corner;
  298. struct regulator *vdd_vsens_voltage;
  299. /* Config parameters */
  300. bool enable;
  301. u32 ref_clk_khz;
  302. u32 timer_delay_us;
  303. u32 timer_cons_up;
  304. u32 timer_cons_down;
  305. u32 irq_line;
  306. u32 *step_quotient;
  307. u32 up_threshold;
  308. u32 down_threshold;
  309. u32 idle_clocks;
  310. u32 gcnt_time_us;
  311. u32 clamp_timer_interval;
  312. u32 vdd_apc_step_up_limit;
  313. u32 vdd_apc_step_down_limit;
  314. u32 flags;
  315. int *corner_map;
  316. u32 num_corners;
  317. int *quot_adjust;
  318. int *mem_acc_corner_map;
  319. int num_adj_cpus;
  320. int online_cpus;
  321. int *adj_cpus;
  322. int **adj_cpus_save_ctl;
  323. int **adj_cpus_save_irq;
  324. int **adj_cpus_last_volt;
  325. int **adj_cpus_quot_adjust;
  326. int **adj_cpus_open_loop_volt;
  327. bool adj_cpus_open_loop_volt_as_ceiling;
  328. struct notifier_block cpu_notifier;
  329. cpumask_t cpu_mask;
  330. bool cpr_disabled_in_pc;
  331. struct notifier_block pm_notifier;
  332. bool is_cpr_suspended;
  333. bool skip_voltage_change_during_suspend;
  334. struct cpr_aging_info *aging_info;
  335. struct notifier_block panic_notifier;
  336. };
  337. #define CPR_DEBUG_MASK_IRQ BIT(0)
  338. #define CPR_DEBUG_MASK_API BIT(1)
  339. static int cpr_debug_enable;
  340. #if defined(CONFIG_DEBUG_FS)
  341. static struct dentry *cpr_debugfs_base;
  342. #endif
  343. static DEFINE_MUTEX(cpr_regulator_list_mutex);
  344. static LIST_HEAD(cpr_regulator_list);
  345. module_param_named(debug_enable, cpr_debug_enable, int, S_IRUGO | S_IWUSR);
  346. #define cpr_debug(cpr_vreg, message, ...) \
  347. do { \
  348. if (cpr_debug_enable & CPR_DEBUG_MASK_API) \
  349. pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
  350. ##__VA_ARGS__); \
  351. } while (0)
  352. #define cpr_debug_irq(cpr_vreg, message, ...) \
  353. do { \
  354. if (cpr_debug_enable & CPR_DEBUG_MASK_IRQ) \
  355. pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
  356. ##__VA_ARGS__); \
  357. else \
  358. pr_debug("%s: " message, (cpr_vreg)->rdesc.name, \
  359. ##__VA_ARGS__); \
  360. } while (0)
  361. #define cpr_info(cpr_vreg, message, ...) \
  362. pr_info("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
  363. #define cpr_err(cpr_vreg, message, ...) \
  364. pr_err("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
  365. static u64 cpr_read_remapped_efuse_row(struct cpr_regulator *cpr_vreg,
  366. u32 row_num)
  367. {
  368. if (row_num - cpr_vreg->remapped_row_base
  369. >= cpr_vreg->num_remapped_rows) {
  370. cpr_err(cpr_vreg, "invalid row=%u, max remapped row=%u\n",
  371. row_num, cpr_vreg->remapped_row_base
  372. + cpr_vreg->num_remapped_rows - 1);
  373. return 0;
  374. }
  375. return cpr_vreg->remapped_row[row_num - cpr_vreg->remapped_row_base];
  376. }
  377. static u64 cpr_read_efuse_row(struct cpr_regulator *cpr_vreg, u32 row_num,
  378. bool use_tz_api)
  379. {
  380. int rc;
  381. u64 efuse_bits;
  382. struct scm_desc desc = {0};
  383. struct cpr_read_req {
  384. u32 row_address;
  385. int addr_type;
  386. } req;
  387. struct cpr_read_rsp {
  388. u32 row_data[2];
  389. u32 status;
  390. } rsp;
  391. if (cpr_vreg->remapped_row && row_num >= cpr_vreg->remapped_row_base)
  392. return cpr_read_remapped_efuse_row(cpr_vreg, row_num);
  393. if (!use_tz_api) {
  394. efuse_bits = readq_relaxed(cpr_vreg->efuse_base
  395. + row_num * BYTES_PER_FUSE_ROW);
  396. return efuse_bits;
  397. }
  398. desc.args[0] = req.row_address = cpr_vreg->efuse_addr +
  399. row_num * BYTES_PER_FUSE_ROW;
  400. desc.args[1] = req.addr_type = 0;
  401. desc.arginfo = SCM_ARGS(2);
  402. efuse_bits = 0;
  403. if (!is_scm_armv8()) {
  404. rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
  405. &req, sizeof(req), &rsp, sizeof(rsp));
  406. } else {
  407. rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ),
  408. &desc);
  409. rsp.row_data[0] = desc.ret[0];
  410. rsp.row_data[1] = desc.ret[1];
  411. rsp.status = desc.ret[2];
  412. }
  413. if (rc) {
  414. cpr_err(cpr_vreg, "read row %d failed, err code = %d",
  415. row_num, rc);
  416. } else {
  417. efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
  418. (u64)rsp.row_data[0];
  419. }
  420. return efuse_bits;
  421. }
  422. /**
  423. * cpr_read_efuse_param() - read a parameter from one or two eFuse rows
  424. * @cpr_vreg: Pointer to cpr_regulator struct for this regulator.
  425. * @row_start: Fuse row number to start reading from.
  426. * @bit_start: The LSB of the parameter to read from the fuse.
  427. * @bit_len: The length of the parameter in bits.
  428. * @use_tz_api: Flag to indicate if an SCM call should be used to read the fuse.
  429. *
  430. * This function reads a parameter of specified offset and bit size out of one
  431. * or two consecutive eFuse rows. This allows for the reading of parameters
  432. * that happen to be split between two eFuse rows.
  433. *
  434. * Returns the fuse parameter on success or 0 on failure.
  435. */
  436. static u64 cpr_read_efuse_param(struct cpr_regulator *cpr_vreg, int row_start,
  437. int bit_start, int bit_len, bool use_tz_api)
  438. {
  439. u64 fuse[2];
  440. u64 param = 0;
  441. int bits_first, bits_second;
  442. if (bit_start < 0) {
  443. cpr_err(cpr_vreg, "Invalid LSB = %d specified\n", bit_start);
  444. return 0;
  445. }
  446. if (bit_len < 0 || bit_len > 64) {
  447. cpr_err(cpr_vreg, "Invalid bit length = %d specified\n",
  448. bit_len);
  449. return 0;
  450. }
  451. /* Allow bit indexing to start beyond the end of the start row. */
  452. if (bit_start >= 64) {
  453. row_start += bit_start >> 6; /* equivalent to bit_start / 64 */
  454. bit_start &= 0x3F;
  455. }
  456. fuse[0] = cpr_read_efuse_row(cpr_vreg, row_start, use_tz_api);
  457. if (bit_start == 0 && bit_len == 64) {
  458. param = fuse[0];
  459. } else if (bit_start + bit_len <= 64) {
  460. param = (fuse[0] >> bit_start) & ((1ULL << bit_len) - 1);
  461. } else {
  462. fuse[1] = cpr_read_efuse_row(cpr_vreg, row_start + 1,
  463. use_tz_api);
  464. bits_first = 64 - bit_start;
  465. bits_second = bit_len - bits_first;
  466. param = (fuse[0] >> bit_start) & ((1ULL << bits_first) - 1);
  467. param |= (fuse[1] & ((1ULL << bits_second) - 1)) << bits_first;
  468. }
  469. return param;
  470. }
  471. static bool cpr_is_allowed(struct cpr_regulator *cpr_vreg)
  472. {
  473. if (cpr_vreg->cpr_fuse_disable || !cpr_vreg->enable)
  474. return false;
  475. else
  476. return true;
  477. }
  478. static void cpr_write(struct cpr_regulator *cpr_vreg, u32 offset, u32 value)
  479. {
  480. writel_relaxed(value, cpr_vreg->rbcpr_base + offset);
  481. }
  482. static u32 cpr_read(struct cpr_regulator *cpr_vreg, u32 offset)
  483. {
  484. return readl_relaxed(cpr_vreg->rbcpr_base + offset);
  485. }
  486. static void cpr_masked_write(struct cpr_regulator *cpr_vreg, u32 offset,
  487. u32 mask, u32 value)
  488. {
  489. u32 reg_val;
  490. reg_val = readl_relaxed(cpr_vreg->rbcpr_base + offset);
  491. reg_val &= ~mask;
  492. reg_val |= value & mask;
  493. writel_relaxed(reg_val, cpr_vreg->rbcpr_base + offset);
  494. }
  495. static void cpr_irq_clr(struct cpr_regulator *cpr_vreg)
  496. {
  497. cpr_write(cpr_vreg, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
  498. }
  499. static void cpr_irq_clr_nack(struct cpr_regulator *cpr_vreg)
  500. {
  501. cpr_irq_clr(cpr_vreg);
  502. cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
  503. }
  504. static void cpr_irq_clr_ack(struct cpr_regulator *cpr_vreg)
  505. {
  506. cpr_irq_clr(cpr_vreg);
  507. cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
  508. }
  509. static void cpr_irq_set(struct cpr_regulator *cpr_vreg, u32 int_bits)
  510. {
  511. cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), int_bits);
  512. }
  513. static void cpr_ctl_modify(struct cpr_regulator *cpr_vreg, u32 mask, u32 value)
  514. {
  515. cpr_masked_write(cpr_vreg, REG_RBCPR_CTL, mask, value);
  516. }
  517. static void cpr_ctl_enable(struct cpr_regulator *cpr_vreg, int corner)
  518. {
  519. u32 val;
  520. if (cpr_vreg->is_cpr_suspended)
  521. return;
  522. /* Program Consecutive Up & Down */
  523. val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
  524. << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
  525. (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
  526. cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
  527. RBIF_TIMER_ADJ_CONS_UP_MASK |
  528. RBIF_TIMER_ADJ_CONS_DOWN_MASK, val);
  529. cpr_masked_write(cpr_vreg, REG_RBCPR_CTL,
  530. RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
  531. RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
  532. cpr_vreg->save_ctl[corner]);
  533. cpr_irq_set(cpr_vreg, cpr_vreg->save_irq[corner]);
  534. if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled &&
  535. (cpr_vreg->ceiling_volt[corner] >
  536. cpr_vreg->floor_volt[corner]))
  537. val = RBCPR_CTL_LOOP_EN;
  538. else
  539. val = 0;
  540. cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, val);
  541. }
  542. static void cpr_ctl_disable(struct cpr_regulator *cpr_vreg)
  543. {
  544. if (cpr_vreg->is_cpr_suspended)
  545. return;
  546. cpr_irq_set(cpr_vreg, 0);
  547. cpr_ctl_modify(cpr_vreg, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
  548. RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
  549. cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
  550. RBIF_TIMER_ADJ_CONS_UP_MASK |
  551. RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
  552. cpr_irq_clr(cpr_vreg);
  553. cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
  554. cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
  555. cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0);
  556. }
  557. static bool cpr_ctl_is_enabled(struct cpr_regulator *cpr_vreg)
  558. {
  559. u32 reg_val;
  560. reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
  561. return reg_val & RBCPR_CTL_LOOP_EN;
  562. }
  563. static bool cpr_ctl_is_busy(struct cpr_regulator *cpr_vreg)
  564. {
  565. u32 reg_val;
  566. reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
  567. return reg_val & RBCPR_RESULT0_BUSY_MASK;
  568. }
  569. static void cpr_corner_save(struct cpr_regulator *cpr_vreg, int corner)
  570. {
  571. cpr_vreg->save_ctl[corner] = cpr_read(cpr_vreg, REG_RBCPR_CTL);
  572. cpr_vreg->save_irq[corner] =
  573. cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
  574. }
  575. static void cpr_corner_restore(struct cpr_regulator *cpr_vreg, int corner)
  576. {
  577. u32 gcnt, ctl, irq, ro_sel, step_quot;
  578. int fuse_corner = cpr_vreg->corner_map[corner];
  579. int i;
  580. ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
  581. gcnt = cpr_vreg->gcnt | (cpr_vreg->cpr_fuse_target_quot[fuse_corner] -
  582. cpr_vreg->quot_adjust[corner]);
  583. /* Program the step quotient and idle clocks */
  584. step_quot = ((cpr_vreg->idle_clocks & RBCPR_STEP_QUOT_IDLE_CLK_MASK)
  585. << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT) |
  586. (cpr_vreg->step_quotient[fuse_corner]
  587. & RBCPR_STEP_QUOT_STEPQUOT_MASK);
  588. cpr_write(cpr_vreg, REG_RBCPR_STEP_QUOT, step_quot);
  589. /* Clear the target quotient value and gate count of all ROs */
  590. for (i = 0; i < CPR_NUM_RING_OSC; i++)
  591. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
  592. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
  593. ctl = cpr_vreg->save_ctl[corner];
  594. cpr_write(cpr_vreg, REG_RBCPR_CTL, ctl);
  595. irq = cpr_vreg->save_irq[corner];
  596. cpr_irq_set(cpr_vreg, irq);
  597. cpr_debug(cpr_vreg, "gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n",
  598. gcnt, ctl, irq);
  599. }
  600. static void cpr_corner_switch(struct cpr_regulator *cpr_vreg, int corner)
  601. {
  602. if (cpr_vreg->corner == corner)
  603. return;
  604. cpr_corner_restore(cpr_vreg, corner);
  605. }
  606. static int cpr_apc_set(struct cpr_regulator *cpr_vreg, u32 new_volt)
  607. {
  608. int max_volt, rc;
  609. max_volt = cpr_vreg->ceiling_max;
  610. rc = regulator_set_voltage(cpr_vreg->vdd_apc, new_volt, max_volt);
  611. if (rc)
  612. cpr_err(cpr_vreg, "set: vdd_apc = %d uV: rc=%d\n",
  613. new_volt, rc);
  614. return rc;
  615. }
  616. static int cpr_mx_get(struct cpr_regulator *cpr_vreg, int corner, int apc_volt)
  617. {
  618. int vdd_mx;
  619. int fuse_corner = cpr_vreg->corner_map[corner];
  620. int highest_fuse_corner = cpr_vreg->num_fuse_corners;
  621. switch (cpr_vreg->vdd_mx_vmin_method) {
  622. case VDD_MX_VMIN_APC:
  623. vdd_mx = apc_volt;
  624. break;
  625. case VDD_MX_VMIN_APC_CORNER_CEILING:
  626. vdd_mx = cpr_vreg->fuse_ceiling_volt[fuse_corner];
  627. break;
  628. case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
  629. vdd_mx = cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
  630. break;
  631. case VDD_MX_VMIN_MX_VMAX:
  632. vdd_mx = cpr_vreg->vdd_mx_vmax;
  633. break;
  634. case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
  635. vdd_mx = cpr_vreg->vdd_mx_corner_map[fuse_corner];
  636. break;
  637. case VDD_MX_VMIN_APC_CORNER_MAP:
  638. vdd_mx = cpr_vreg->vdd_mx_corner_map[corner];
  639. break;
  640. default:
  641. vdd_mx = 0;
  642. break;
  643. }
  644. return vdd_mx;
  645. }
  646. static int cpr_mx_set(struct cpr_regulator *cpr_vreg, int corner,
  647. int vdd_mx_vmin)
  648. {
  649. int rc;
  650. int fuse_corner = cpr_vreg->corner_map[corner];
  651. rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin,
  652. cpr_vreg->vdd_mx_vmax);
  653. cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] %d uV\n", corner,
  654. fuse_corner, vdd_mx_vmin);
  655. if (!rc) {
  656. cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
  657. } else {
  658. cpr_err(cpr_vreg, "set: vdd_mx [corner:%d, fuse_corner:%d] = %d uV failed: rc=%d\n",
  659. corner, fuse_corner, vdd_mx_vmin, rc);
  660. }
  661. return rc;
  662. }
  663. static int cpr_scale_voltage(struct cpr_regulator *cpr_vreg, int corner,
  664. int new_apc_volt, enum voltage_change_dir dir)
  665. {
  666. int rc = 0, vdd_mx_vmin = 0;
  667. int mem_acc_corner = cpr_vreg->mem_acc_corner_map[corner];
  668. int fuse_corner = cpr_vreg->corner_map[corner];
  669. int apc_corner, vsens_corner;
  670. /* Determine the vdd_mx voltage */
  671. if (dir != NO_CHANGE && cpr_vreg->vdd_mx != NULL)
  672. vdd_mx_vmin = cpr_mx_get(cpr_vreg, corner, new_apc_volt);
  673. if (cpr_vreg->vdd_vsens_voltage && cpr_vreg->vsens_enabled) {
  674. rc = regulator_disable(cpr_vreg->vdd_vsens_voltage);
  675. if (!rc)
  676. cpr_vreg->vsens_enabled = false;
  677. }
  678. if (dir == DOWN) {
  679. if (!rc && cpr_vreg->mem_acc_vreg)
  680. rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
  681. mem_acc_corner, mem_acc_corner);
  682. if (!rc && cpr_vreg->rpm_apc_vreg) {
  683. apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
  684. rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
  685. apc_corner, apc_corner);
  686. if (rc)
  687. cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
  688. rc);
  689. }
  690. }
  691. if (!rc && vdd_mx_vmin && dir == UP) {
  692. if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
  693. rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
  694. }
  695. if (!rc)
  696. rc = cpr_apc_set(cpr_vreg, new_apc_volt);
  697. if (dir == UP) {
  698. if (!rc && cpr_vreg->mem_acc_vreg)
  699. rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
  700. mem_acc_corner, mem_acc_corner);
  701. if (!rc && cpr_vreg->rpm_apc_vreg) {
  702. apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
  703. rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
  704. apc_corner, apc_corner);
  705. if (rc)
  706. cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
  707. rc);
  708. }
  709. }
  710. if (!rc && vdd_mx_vmin && dir == DOWN) {
  711. if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
  712. rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
  713. }
  714. if (!rc && cpr_vreg->vdd_vsens_corner) {
  715. vsens_corner = cpr_vreg->vsens_corner_map[fuse_corner];
  716. rc = regulator_set_voltage(cpr_vreg->vdd_vsens_corner,
  717. vsens_corner, vsens_corner);
  718. }
  719. if (!rc && cpr_vreg->vdd_vsens_voltage) {
  720. rc = regulator_set_voltage(cpr_vreg->vdd_vsens_voltage,
  721. cpr_vreg->floor_volt[corner],
  722. cpr_vreg->ceiling_volt[corner]);
  723. if (!rc && !cpr_vreg->vsens_enabled) {
  724. rc = regulator_enable(cpr_vreg->vdd_vsens_voltage);
  725. if (!rc)
  726. cpr_vreg->vsens_enabled = true;
  727. }
  728. }
  729. return rc;
  730. }
  731. static void cpr_scale(struct cpr_regulator *cpr_vreg,
  732. enum voltage_change_dir dir)
  733. {
  734. u32 reg_val, error_steps, reg_mask;
  735. int last_volt, new_volt, corner, fuse_corner;
  736. u32 gcnt, quot;
  737. corner = cpr_vreg->corner;
  738. fuse_corner = cpr_vreg->corner_map[corner];
  739. reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
  740. error_steps = (reg_val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
  741. & RBCPR_RESULT0_ERROR_STEPS_MASK;
  742. last_volt = cpr_vreg->last_volt[corner];
  743. cpr_debug_irq(cpr_vreg,
  744. "last_volt[corner:%d, fuse_corner:%d] = %d uV\n",
  745. corner, fuse_corner, last_volt);
  746. gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET
  747. (cpr_vreg->cpr_fuse_ro_sel[fuse_corner]));
  748. quot = gcnt & ((1 << RBCPR_GCNT_TARGET_GCNT_SHIFT) - 1);
  749. if (dir == UP) {
  750. if (cpr_vreg->clamp_timer_interval
  751. && error_steps < cpr_vreg->up_threshold) {
  752. /*
  753. * Handle the case where another measurement started
  754. * after the interrupt was triggered due to a core
  755. * exiting from power collapse.
  756. */
  757. error_steps = max(cpr_vreg->up_threshold,
  758. cpr_vreg->vdd_apc_step_up_limit);
  759. }
  760. cpr_debug_irq(cpr_vreg,
  761. "Up: cpr status = 0x%08x (error_steps=%d)\n",
  762. reg_val, error_steps);
  763. if (last_volt >= cpr_vreg->ceiling_volt[corner]) {
  764. cpr_debug_irq(cpr_vreg,
  765. "[corn:%d, fuse_corn:%d] @ ceiling: %d >= %d: NACK\n",
  766. corner, fuse_corner, last_volt,
  767. cpr_vreg->ceiling_volt[corner]);
  768. cpr_irq_clr_nack(cpr_vreg);
  769. cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
  770. gcnt, quot);
  771. /* Maximize the UP threshold */
  772. reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
  773. RBCPR_CTL_UP_THRESHOLD_SHIFT;
  774. reg_val = reg_mask;
  775. cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
  776. /* Disable UP interrupt */
  777. cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_UP);
  778. return;
  779. }
  780. if (error_steps > cpr_vreg->vdd_apc_step_up_limit) {
  781. cpr_debug_irq(cpr_vreg,
  782. "%d is over up-limit(%d): Clamp\n",
  783. error_steps,
  784. cpr_vreg->vdd_apc_step_up_limit);
  785. error_steps = cpr_vreg->vdd_apc_step_up_limit;
  786. }
  787. /* Calculate new voltage */
  788. new_volt = last_volt + (error_steps * cpr_vreg->step_volt);
  789. if (new_volt > cpr_vreg->ceiling_volt[corner]) {
  790. cpr_debug_irq(cpr_vreg,
  791. "new_volt(%d) >= ceiling(%d): Clamp\n",
  792. new_volt,
  793. cpr_vreg->ceiling_volt[corner]);
  794. new_volt = cpr_vreg->ceiling_volt[corner];
  795. }
  796. if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
  797. cpr_irq_clr_nack(cpr_vreg);
  798. return;
  799. }
  800. cpr_vreg->last_volt[corner] = new_volt;
  801. /* Disable auto nack down */
  802. reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
  803. reg_val = 0;
  804. cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
  805. /* Re-enable default interrupts */
  806. cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
  807. /* Ack */
  808. cpr_irq_clr_ack(cpr_vreg);
  809. cpr_debug_irq(cpr_vreg,
  810. "UP: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
  811. corner, fuse_corner, new_volt);
  812. } else if (dir == DOWN) {
  813. if (cpr_vreg->clamp_timer_interval
  814. && error_steps < cpr_vreg->down_threshold) {
  815. /*
  816. * Handle the case where another measurement started
  817. * after the interrupt was triggered due to a core
  818. * exiting from power collapse.
  819. */
  820. error_steps = max(cpr_vreg->down_threshold,
  821. cpr_vreg->vdd_apc_step_down_limit);
  822. }
  823. cpr_debug_irq(cpr_vreg,
  824. "Down: cpr status = 0x%08x (error_steps=%d)\n",
  825. reg_val, error_steps);
  826. if (last_volt <= cpr_vreg->floor_volt[corner]) {
  827. cpr_debug_irq(cpr_vreg,
  828. "[corn:%d, fuse_corner:%d] @ floor: %d <= %d: NACK\n",
  829. corner, fuse_corner, last_volt,
  830. cpr_vreg->floor_volt[corner]);
  831. cpr_irq_clr_nack(cpr_vreg);
  832. cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
  833. gcnt, quot);
  834. /* Enable auto nack down */
  835. reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
  836. reg_val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
  837. cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
  838. /* Disable DOWN interrupt */
  839. cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
  840. return;
  841. }
  842. if (error_steps > cpr_vreg->vdd_apc_step_down_limit) {
  843. cpr_debug_irq(cpr_vreg,
  844. "%d is over down-limit(%d): Clamp\n",
  845. error_steps,
  846. cpr_vreg->vdd_apc_step_down_limit);
  847. error_steps = cpr_vreg->vdd_apc_step_down_limit;
  848. }
  849. /* Calculte new voltage */
  850. new_volt = last_volt - (error_steps * cpr_vreg->step_volt);
  851. if (new_volt < cpr_vreg->floor_volt[corner]) {
  852. cpr_debug_irq(cpr_vreg,
  853. "new_volt(%d) < floor(%d): Clamp\n",
  854. new_volt,
  855. cpr_vreg->floor_volt[corner]);
  856. new_volt = cpr_vreg->floor_volt[corner];
  857. }
  858. if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
  859. cpr_irq_clr_nack(cpr_vreg);
  860. return;
  861. }
  862. cpr_vreg->last_volt[corner] = new_volt;
  863. /* Restore default threshold for UP */
  864. reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
  865. RBCPR_CTL_UP_THRESHOLD_SHIFT;
  866. reg_val = cpr_vreg->up_threshold <<
  867. RBCPR_CTL_UP_THRESHOLD_SHIFT;
  868. cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
  869. /* Re-enable default interrupts */
  870. cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
  871. /* Ack */
  872. cpr_irq_clr_ack(cpr_vreg);
  873. cpr_debug_irq(cpr_vreg,
  874. "DOWN: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
  875. corner, fuse_corner, new_volt);
  876. }
  877. }
  878. static irqreturn_t cpr_irq_handler(int irq, void *dev)
  879. {
  880. struct cpr_regulator *cpr_vreg = dev;
  881. u32 reg_val;
  882. mutex_lock(&cpr_vreg->cpr_mutex);
  883. reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
  884. if (cpr_vreg->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
  885. reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
  886. cpr_debug_irq(cpr_vreg, "IRQ_STATUS = 0x%02X\n", reg_val);
  887. if (!cpr_ctl_is_enabled(cpr_vreg)) {
  888. cpr_debug_irq(cpr_vreg, "CPR is disabled\n");
  889. goto _exit;
  890. } else if (cpr_ctl_is_busy(cpr_vreg)
  891. && !cpr_vreg->clamp_timer_interval) {
  892. cpr_debug_irq(cpr_vreg, "CPR measurement is not ready\n");
  893. goto _exit;
  894. } else if (!cpr_is_allowed(cpr_vreg)) {
  895. reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
  896. cpr_err(cpr_vreg, "Interrupt broken? RBCPR_CTL = 0x%02X\n",
  897. reg_val);
  898. goto _exit;
  899. }
  900. /* Following sequence of handling is as per each IRQ's priority */
  901. if (reg_val & CPR_INT_UP) {
  902. cpr_scale(cpr_vreg, UP);
  903. } else if (reg_val & CPR_INT_DOWN) {
  904. cpr_scale(cpr_vreg, DOWN);
  905. } else if (reg_val & CPR_INT_MIN) {
  906. cpr_irq_clr_nack(cpr_vreg);
  907. } else if (reg_val & CPR_INT_MAX) {
  908. cpr_irq_clr_nack(cpr_vreg);
  909. } else if (reg_val & CPR_INT_MID) {
  910. /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
  911. cpr_debug_irq(cpr_vreg, "IRQ occurred for Mid Flag\n");
  912. } else {
  913. cpr_debug_irq(cpr_vreg,
  914. "IRQ occurred for unknown flag (0x%08x)\n", reg_val);
  915. }
  916. /* Save register values for the corner */
  917. cpr_corner_save(cpr_vreg, cpr_vreg->corner);
  918. _exit:
  919. mutex_unlock(&cpr_vreg->cpr_mutex);
  920. return IRQ_HANDLED;
  921. }
  922. /**
  923. * cmp_int() - int comparison function to be passed into the sort() function
  924. * which leads to ascending sorting
  925. * @a: First int value
  926. * @b: Second int value
  927. *
  928. * Return: >0 if a > b, 0 if a == b, <0 if a < b
  929. */
  930. static int cmp_int(const void *a, const void *b)
  931. {
  932. return *(int *)a - *(int *)b;
  933. }
  934. static int cpr_get_aging_quot_delta(struct cpr_regulator *cpr_vreg,
  935. struct cpr_aging_sensor_info *aging_sensor_info)
  936. {
  937. int quot_min, quot_max, is_aging_measurement, aging_measurement_count;
  938. int quot_min_scaled, quot_max_scaled, quot_delta_scaled_sum;
  939. int retries, rc = 0, sel_fast = 0, i, quot_delta_scaled;
  940. u32 val, gcnt_ref, gcnt;
  941. int *quot_delta_results, filtered_count;
  942. quot_delta_results = kcalloc(CPR_AGING_MEASUREMENT_ITERATIONS,
  943. sizeof(*quot_delta_results), GFP_ATOMIC);
  944. if (!quot_delta_results)
  945. return -ENOMEM;
  946. /* Clear the target quotient value and gate count of all ROs */
  947. for (i = 0; i < CPR_NUM_RING_OSC; i++)
  948. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
  949. /* Program GCNT0/1 for getting aging data */
  950. gcnt_ref = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
  951. gcnt = gcnt_ref * 3 / 2;
  952. val = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
  953. RBCPR_GCNT_TARGET_GCNT_SHIFT;
  954. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), val);
  955. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), val);
  956. val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(0));
  957. cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET0 = 0x%08x\n", val);
  958. val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(1));
  959. cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET1 = 0x%08x\n", val);
  960. /* Program TIMER_INTERVAL to zero */
  961. cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, 0);
  962. /* Bypass sensors in collapsible domain */
  963. if (cpr_vreg->aging_info->aging_sensor_bypass)
  964. cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0,
  965. (cpr_vreg->aging_info->aging_sensor_bypass &
  966. RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id)));
  967. /* Mask other sensors */
  968. cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0,
  969. RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id));
  970. val = cpr_read(cpr_vreg, REG_RBCPR_SENSOR_MASK0);
  971. cpr_debug(cpr_vreg, "RBCPR_SENSOR_MASK0 = 0x%08x\n", val);
  972. /* Enable cpr controller */
  973. cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, RBCPR_CTL_LOOP_EN);
  974. /* Make sure cpr starts measurement with toggling busy bit */
  975. mb();
  976. /* Wait and Ignore the first measurement. Time-out after 5ms */
  977. retries = 50;
  978. while (retries-- && cpr_ctl_is_busy(cpr_vreg))
  979. udelay(100);
  980. if (retries < 0) {
  981. cpr_err(cpr_vreg, "Aging calibration failed\n");
  982. rc = -EBUSY;
  983. goto _exit;
  984. }
  985. /* Set age page mode */
  986. cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, RBCPR_HTOL_AGE_PAGE);
  987. aging_measurement_count = 0;
  988. quot_delta_scaled_sum = 0;
  989. for (i = 0; i < CPR_AGING_MEASUREMENT_ITERATIONS; i++) {
  990. /* Send cont nack */
  991. cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
  992. /*
  993. * Make sure cpr starts next measurement with
  994. * toggling busy bit
  995. */
  996. mb();
  997. /*
  998. * Wait for controller to finish measurement
  999. * and time-out after 5ms
  1000. */
  1001. retries = 50;
  1002. while (retries-- && cpr_ctl_is_busy(cpr_vreg))
  1003. udelay(100);
  1004. if (retries < 0) {
  1005. cpr_err(cpr_vreg, "Aging calibration failed\n");
  1006. rc = -EBUSY;
  1007. goto _exit;
  1008. }
  1009. /* Check for PAGE_IS_AGE flag in status register */
  1010. val = cpr_read(cpr_vreg, REG_RBCPR_HTOL_AGE);
  1011. is_aging_measurement = val & RBCPR_AGE_DATA_STATUS;
  1012. val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_1);
  1013. sel_fast = RBCPR_RESULT_1_SEL_FAST(val);
  1014. cpr_debug(cpr_vreg, "RBCPR_RESULT_1 = 0x%08x\n", val);
  1015. val = cpr_read(cpr_vreg, REG_RBCPR_DEBUG1);
  1016. cpr_debug(cpr_vreg, "RBCPR_DEBUG1 = 0x%08x\n", val);
  1017. if (sel_fast == 1) {
  1018. quot_min = RBCPR_DEBUG1_QUOT_FAST(val);
  1019. quot_max = RBCPR_DEBUG1_QUOT_SLOW(val);
  1020. } else {
  1021. quot_min = RBCPR_DEBUG1_QUOT_SLOW(val);
  1022. quot_max = RBCPR_DEBUG1_QUOT_FAST(val);
  1023. }
  1024. /*
  1025. * Scale the quotients so that they are equivalent to the fused
  1026. * values. This accounts for the difference in measurement
  1027. * interval times.
  1028. */
  1029. quot_min_scaled = quot_min * (gcnt_ref + 1) / (gcnt + 1);
  1030. quot_max_scaled = quot_max * (gcnt_ref + 1) / (gcnt + 1);
  1031. quot_delta_scaled = 0;
  1032. if (is_aging_measurement) {
  1033. quot_delta_scaled = quot_min_scaled - quot_max_scaled;
  1034. quot_delta_results[aging_measurement_count++] =
  1035. quot_delta_scaled;
  1036. }
  1037. cpr_debug(cpr_vreg,
  1038. "Age sensor[%d]: measurement[%d]: page_is_age=%u quot_min = %d, quot_max = %d quot_min_scaled = %d, quot_max_scaled = %d quot_delta_scaled = %d\n",
  1039. aging_sensor_info->sensor_id, i, is_aging_measurement,
  1040. quot_min, quot_max, quot_min_scaled, quot_max_scaled,
  1041. quot_delta_scaled);
  1042. }
  1043. filtered_count
  1044. = aging_measurement_count - CPR_AGING_MEASUREMENT_FILTER * 2;
  1045. if (filtered_count > 0) {
  1046. sort(quot_delta_results, aging_measurement_count,
  1047. sizeof(*quot_delta_results), cmp_int, NULL);
  1048. quot_delta_scaled_sum = 0;
  1049. for (i = 0; i < filtered_count; i++)
  1050. quot_delta_scaled_sum
  1051. += quot_delta_results[i
  1052. + CPR_AGING_MEASUREMENT_FILTER];
  1053. aging_sensor_info->current_quot_diff
  1054. = quot_delta_scaled_sum / filtered_count;
  1055. cpr_debug(cpr_vreg,
  1056. "Age sensor[%d]: average aging quotient delta = %d (count = %d)\n",
  1057. aging_sensor_info->sensor_id,
  1058. aging_sensor_info->current_quot_diff, filtered_count);
  1059. } else {
  1060. cpr_err(cpr_vreg, "%d aging measurements completed after %d iterations\n",
  1061. aging_measurement_count,
  1062. CPR_AGING_MEASUREMENT_ITERATIONS);
  1063. rc = -EBUSY;
  1064. }
  1065. _exit:
  1066. /* Clear age page bit */
  1067. cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, 0x0);
  1068. /* Disable the CPR controller after aging procedure */
  1069. cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0x0);
  1070. /* Clear the sensor bypass */
  1071. if (cpr_vreg->aging_info->aging_sensor_bypass)
  1072. cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0, 0x0);
  1073. /* Unmask all sensors */
  1074. cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0, 0x0);
  1075. /* Clear gcnt0/1 registers */
  1076. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), 0x0);
  1077. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), 0x0);
  1078. /* Program the delay count for the timer */
  1079. val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
  1080. cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
  1081. kfree(quot_delta_results);
  1082. return rc;
  1083. }
  1084. static void cpr_de_aging_adjustment(void *data)
  1085. {
  1086. struct cpr_regulator *cpr_vreg = (struct cpr_regulator *)data;
  1087. struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
  1088. struct cpr_aging_sensor_info *aging_sensor_info;
  1089. int i, num_aging_sensors, retries, rc = 0;
  1090. int max_quot_diff = 0, ro_sel = 0;
  1091. u32 voltage_adjust, aging_voltage_adjust = 0;
  1092. aging_sensor_info = aging_info->sensor_info;
  1093. num_aging_sensors = aging_info->num_aging_sensors;
  1094. for (i = 0; i < num_aging_sensors; i++, aging_sensor_info++) {
  1095. retries = 2;
  1096. while (retries--) {
  1097. rc = cpr_get_aging_quot_delta(cpr_vreg,
  1098. aging_sensor_info);
  1099. if (!rc)
  1100. break;
  1101. }
  1102. if (rc && retries < 0) {
  1103. cpr_err(cpr_vreg, "error in age calibration: rc = %d\n",
  1104. rc);
  1105. aging_info->cpr_aging_error = true;
  1106. return;
  1107. }
  1108. max_quot_diff = max(max_quot_diff,
  1109. (aging_sensor_info->current_quot_diff -
  1110. aging_sensor_info->initial_quot_diff));
  1111. }
  1112. cpr_debug(cpr_vreg, "Max aging quot delta = %d\n",
  1113. max_quot_diff);
  1114. aging_voltage_adjust = DIV_ROUND_UP(max_quot_diff * 1000000,
  1115. aging_info->aging_ro_kv);
  1116. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  1117. /* Remove initial max aging adjustment */
  1118. ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
  1119. cpr_vreg->cpr_fuse_target_quot[i] -=
  1120. (aging_info->cpr_ro_kv[ro_sel]
  1121. * aging_info->max_aging_margin) / 1000000;
  1122. aging_info->voltage_adjust[i] = 0;
  1123. if (aging_voltage_adjust > 0) {
  1124. /* Add required aging adjustment */
  1125. voltage_adjust = (aging_voltage_adjust
  1126. * aging_info->aging_derate[i]) / 1000;
  1127. voltage_adjust = min(voltage_adjust,
  1128. aging_info->max_aging_margin);
  1129. cpr_vreg->cpr_fuse_target_quot[i] +=
  1130. (aging_info->cpr_ro_kv[ro_sel]
  1131. * voltage_adjust) / 1000000;
  1132. aging_info->voltage_adjust[i] = voltage_adjust;
  1133. }
  1134. }
  1135. }
  1136. static int cpr_regulator_is_enabled(struct regulator_dev *rdev)
  1137. {
  1138. struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
  1139. return cpr_vreg->vreg_enabled;
  1140. }
  1141. static int cpr_regulator_enable(struct regulator_dev *rdev)
  1142. {
  1143. struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
  1144. int rc = 0;
  1145. /* Enable dependency power before vdd_apc */
  1146. if (cpr_vreg->vdd_mx) {
  1147. rc = regulator_enable(cpr_vreg->vdd_mx);
  1148. if (rc) {
  1149. cpr_err(cpr_vreg, "regulator_enable: vdd_mx: rc=%d\n",
  1150. rc);
  1151. return rc;
  1152. }
  1153. }
  1154. rc = regulator_enable(cpr_vreg->vdd_apc);
  1155. if (rc) {
  1156. cpr_err(cpr_vreg, "regulator_enable: vdd_apc: rc=%d\n", rc);
  1157. return rc;
  1158. }
  1159. mutex_lock(&cpr_vreg->cpr_mutex);
  1160. cpr_vreg->vreg_enabled = true;
  1161. if (cpr_is_allowed(cpr_vreg) && cpr_vreg->corner) {
  1162. cpr_irq_clr(cpr_vreg);
  1163. cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
  1164. cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
  1165. }
  1166. mutex_unlock(&cpr_vreg->cpr_mutex);
  1167. return rc;
  1168. }
  1169. static int cpr_regulator_disable(struct regulator_dev *rdev)
  1170. {
  1171. struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
  1172. int rc;
  1173. rc = regulator_disable(cpr_vreg->vdd_apc);
  1174. if (!rc) {
  1175. if (cpr_vreg->vdd_mx)
  1176. rc = regulator_disable(cpr_vreg->vdd_mx);
  1177. if (rc) {
  1178. cpr_err(cpr_vreg, "regulator_disable: vdd_mx: rc=%d\n",
  1179. rc);
  1180. return rc;
  1181. }
  1182. mutex_lock(&cpr_vreg->cpr_mutex);
  1183. cpr_vreg->vreg_enabled = false;
  1184. if (cpr_is_allowed(cpr_vreg))
  1185. cpr_ctl_disable(cpr_vreg);
  1186. mutex_unlock(&cpr_vreg->cpr_mutex);
  1187. } else {
  1188. cpr_err(cpr_vreg, "regulator_disable: vdd_apc: rc=%d\n", rc);
  1189. }
  1190. return rc;
  1191. }
  1192. static int cpr_calculate_de_aging_margin(struct cpr_regulator *cpr_vreg)
  1193. {
  1194. struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
  1195. enum voltage_change_dir change_dir = NO_CHANGE;
  1196. u32 save_ctl, save_irq;
  1197. cpumask_t tmp_mask;
  1198. int rc = 0, i;
  1199. save_ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
  1200. save_irq = cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
  1201. /* Disable interrupt and CPR */
  1202. cpr_irq_set(cpr_vreg, 0);
  1203. cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
  1204. if (aging_info->aging_corner > cpr_vreg->corner)
  1205. change_dir = UP;
  1206. else if (aging_info->aging_corner < cpr_vreg->corner)
  1207. change_dir = DOWN;
  1208. /* set selected reference voltage for de-aging */
  1209. rc = cpr_scale_voltage(cpr_vreg,
  1210. aging_info->aging_corner,
  1211. aging_info->aging_ref_voltage,
  1212. change_dir);
  1213. if (rc) {
  1214. cpr_err(cpr_vreg, "Unable to set aging reference voltage, rc = %d\n",
  1215. rc);
  1216. return rc;
  1217. }
  1218. /* Force PWM mode */
  1219. rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_NORMAL);
  1220. if (rc) {
  1221. cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
  1222. REGULATOR_MODE_NORMAL, rc);
  1223. return rc;
  1224. }
  1225. get_online_cpus();
  1226. cpumask_and(&tmp_mask, &cpr_vreg->cpu_mask, cpu_online_mask);
  1227. if (!cpumask_empty(&tmp_mask)) {
  1228. smp_call_function_any(&tmp_mask,
  1229. cpr_de_aging_adjustment,
  1230. cpr_vreg, true);
  1231. aging_info->cpr_aging_done = true;
  1232. if (!aging_info->cpr_aging_error)
  1233. for (i = CPR_FUSE_CORNER_MIN;
  1234. i <= cpr_vreg->num_fuse_corners; i++)
  1235. cpr_info(cpr_vreg, "Corner[%d]: age adjusted target quot = %d\n",
  1236. i, cpr_vreg->cpr_fuse_target_quot[i]);
  1237. }
  1238. put_online_cpus();
  1239. /* Set to initial mode */
  1240. rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_IDLE);
  1241. if (rc) {
  1242. cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
  1243. REGULATOR_MODE_IDLE, rc);
  1244. return rc;
  1245. }
  1246. /* Clear interrupts */
  1247. cpr_irq_clr(cpr_vreg);
  1248. /* Restore register values */
  1249. cpr_irq_set(cpr_vreg, save_irq);
  1250. cpr_write(cpr_vreg, REG_RBCPR_CTL, save_ctl);
  1251. return rc;
  1252. }
  1253. /* Note that cpr_vreg->cpr_mutex must be held by the caller. */
  1254. static int cpr_regulator_set_voltage(struct regulator_dev *rdev,
  1255. int corner, bool reset_quot)
  1256. {
  1257. struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
  1258. struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
  1259. int rc;
  1260. int new_volt;
  1261. enum voltage_change_dir change_dir = NO_CHANGE;
  1262. int fuse_corner = cpr_vreg->corner_map[corner];
  1263. if (cpr_is_allowed(cpr_vreg)) {
  1264. cpr_ctl_disable(cpr_vreg);
  1265. new_volt = cpr_vreg->last_volt[corner];
  1266. } else {
  1267. new_volt = cpr_vreg->open_loop_volt[corner];
  1268. }
  1269. cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] = %d uV\n",
  1270. corner, fuse_corner, new_volt);
  1271. if (corner > cpr_vreg->corner)
  1272. change_dir = UP;
  1273. else if (corner < cpr_vreg->corner)
  1274. change_dir = DOWN;
  1275. /* Read age sensor data and apply de-aging adjustments */
  1276. if (cpr_vreg->vreg_enabled && aging_info && !aging_info->cpr_aging_done
  1277. && (corner <= aging_info->aging_corner)) {
  1278. rc = cpr_calculate_de_aging_margin(cpr_vreg);
  1279. if (rc) {
  1280. cpr_err(cpr_vreg, "failed in de-aging calibration: rc=%d\n",
  1281. rc);
  1282. } else {
  1283. change_dir = NO_CHANGE;
  1284. if (corner > aging_info->aging_corner)
  1285. change_dir = UP;
  1286. else if (corner < aging_info->aging_corner)
  1287. change_dir = DOWN;
  1288. }
  1289. reset_quot = true;
  1290. }
  1291. rc = cpr_scale_voltage(cpr_vreg, corner, new_volt, change_dir);
  1292. if (rc)
  1293. return rc;
  1294. if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled) {
  1295. cpr_irq_clr(cpr_vreg);
  1296. if (reset_quot)
  1297. cpr_corner_restore(cpr_vreg, corner);
  1298. else
  1299. cpr_corner_switch(cpr_vreg, corner);
  1300. cpr_ctl_enable(cpr_vreg, corner);
  1301. }
  1302. cpr_vreg->corner = corner;
  1303. return rc;
  1304. }
  1305. static int cpr_regulator_set_voltage_op(struct regulator_dev *rdev,
  1306. int corner, int corner_max, unsigned *selector)
  1307. {
  1308. struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
  1309. int rc;
  1310. mutex_lock(&cpr_vreg->cpr_mutex);
  1311. rc = cpr_regulator_set_voltage(rdev, corner, false);
  1312. mutex_unlock(&cpr_vreg->cpr_mutex);
  1313. return rc;
  1314. }
  1315. static int cpr_regulator_get_voltage(struct regulator_dev *rdev)
  1316. {
  1317. struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
  1318. return cpr_vreg->corner;
  1319. }
  1320. /**
  1321. * cpr_regulator_list_corner_voltage() - return the ceiling voltage mapped to
  1322. * the specified voltage corner
  1323. * @rdev: Regulator device pointer for the cpr-regulator
  1324. * @corner: Voltage corner
  1325. *
  1326. * This function is passed as a callback function into the regulator ops that
  1327. * are registered for each cpr-regulator device.
  1328. *
  1329. * Return: voltage value in microvolts or -EINVAL if the corner is out of range
  1330. */
  1331. static int cpr_regulator_list_corner_voltage(struct regulator_dev *rdev,
  1332. int corner)
  1333. {
  1334. struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
  1335. if (corner >= CPR_CORNER_MIN && corner <= cpr_vreg->num_corners)
  1336. return cpr_vreg->ceiling_volt[corner];
  1337. else
  1338. return -EINVAL;
  1339. }
  1340. static struct regulator_ops cpr_corner_ops = {
  1341. .enable = cpr_regulator_enable,
  1342. .disable = cpr_regulator_disable,
  1343. .is_enabled = cpr_regulator_is_enabled,
  1344. .set_voltage = cpr_regulator_set_voltage_op,
  1345. .get_voltage = cpr_regulator_get_voltage,
  1346. .list_corner_voltage = cpr_regulator_list_corner_voltage,
  1347. };
  1348. #ifdef CONFIG_PM
  1349. static int cpr_suspend(struct cpr_regulator *cpr_vreg)
  1350. {
  1351. cpr_debug(cpr_vreg, "suspend\n");
  1352. cpr_ctl_disable(cpr_vreg);
  1353. cpr_irq_clr(cpr_vreg);
  1354. return 0;
  1355. }
  1356. static int cpr_resume(struct cpr_regulator *cpr_vreg)
  1357. {
  1358. cpr_debug(cpr_vreg, "resume\n");
  1359. cpr_irq_clr(cpr_vreg);
  1360. cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
  1361. return 0;
  1362. }
  1363. static int cpr_regulator_suspend(struct platform_device *pdev,
  1364. pm_message_t state)
  1365. {
  1366. struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
  1367. int rc = 0;
  1368. mutex_lock(&cpr_vreg->cpr_mutex);
  1369. if (cpr_is_allowed(cpr_vreg))
  1370. rc = cpr_suspend(cpr_vreg);
  1371. cpr_vreg->is_cpr_suspended = true;
  1372. mutex_unlock(&cpr_vreg->cpr_mutex);
  1373. return rc;
  1374. }
  1375. static int cpr_regulator_resume(struct platform_device *pdev)
  1376. {
  1377. struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
  1378. int rc = 0;
  1379. mutex_lock(&cpr_vreg->cpr_mutex);
  1380. cpr_vreg->is_cpr_suspended = false;
  1381. if (cpr_is_allowed(cpr_vreg))
  1382. rc = cpr_resume(cpr_vreg);
  1383. mutex_unlock(&cpr_vreg->cpr_mutex);
  1384. return rc;
  1385. }
  1386. #else
  1387. #define cpr_regulator_suspend NULL
  1388. #define cpr_regulator_resume NULL
  1389. #endif
  1390. static int cpr_config(struct cpr_regulator *cpr_vreg, struct device *dev)
  1391. {
  1392. int i;
  1393. u32 val, gcnt, reg;
  1394. void __iomem *rbcpr_clk;
  1395. int size;
  1396. if (cpr_vreg->rbcpr_clk_addr) {
  1397. /* Use 19.2 MHz clock for CPR. */
  1398. rbcpr_clk = ioremap(cpr_vreg->rbcpr_clk_addr, 4);
  1399. if (!rbcpr_clk) {
  1400. cpr_err(cpr_vreg, "Unable to map rbcpr_clk\n");
  1401. return -EINVAL;
  1402. }
  1403. reg = readl_relaxed(rbcpr_clk);
  1404. reg &= ~RBCPR_CLK_SEL_MASK;
  1405. reg |= RBCPR_CLK_SEL_19P2_MHZ & RBCPR_CLK_SEL_MASK;
  1406. writel_relaxed(reg, rbcpr_clk);
  1407. iounmap(rbcpr_clk);
  1408. }
  1409. /* Disable interrupt and CPR */
  1410. cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), 0);
  1411. cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
  1412. /* Program the default HW Ceiling, Floor and vlevel */
  1413. val = ((RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
  1414. << RBIF_LIMIT_CEILING_SHIFT)
  1415. | (RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK);
  1416. cpr_write(cpr_vreg, REG_RBIF_LIMIT, val);
  1417. cpr_write(cpr_vreg, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
  1418. /* Clear the target quotient value and gate count of all ROs */
  1419. for (i = 0; i < CPR_NUM_RING_OSC; i++)
  1420. cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
  1421. /* Init and save gcnt */
  1422. gcnt = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
  1423. gcnt = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
  1424. RBCPR_GCNT_TARGET_GCNT_SHIFT;
  1425. cpr_vreg->gcnt = gcnt;
  1426. /* Program the delay count for the timer */
  1427. val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
  1428. cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
  1429. cpr_info(cpr_vreg, "Timer count: 0x%0x (for %d us)\n", val,
  1430. cpr_vreg->timer_delay_us);
  1431. /* Program Consecutive Up & Down */
  1432. val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
  1433. << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
  1434. (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK) |
  1435. ((cpr_vreg->clamp_timer_interval & RBIF_TIMER_ADJ_CLAMP_INT_MASK)
  1436. << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT);
  1437. cpr_write(cpr_vreg, REG_RBIF_TIMER_ADJUST, val);
  1438. /* Program the control register */
  1439. cpr_vreg->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
  1440. cpr_vreg->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
  1441. val = (cpr_vreg->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT)
  1442. | (cpr_vreg->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT);
  1443. val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
  1444. val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
  1445. cpr_write(cpr_vreg, REG_RBCPR_CTL, val);
  1446. cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
  1447. val = cpr_read(cpr_vreg, REG_RBCPR_VERSION);
  1448. if (val <= RBCPR_VER_2)
  1449. cpr_vreg->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
  1450. size = cpr_vreg->num_corners + 1;
  1451. cpr_vreg->save_ctl = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
  1452. cpr_vreg->save_irq = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
  1453. if (!cpr_vreg->save_ctl || !cpr_vreg->save_irq)
  1454. return -ENOMEM;
  1455. for (i = 1; i < size; i++)
  1456. cpr_corner_save(cpr_vreg, i);
  1457. return 0;
  1458. }
  1459. static int cpr_fuse_is_setting_expected(struct cpr_regulator *cpr_vreg,
  1460. u32 sel_array[5])
  1461. {
  1462. u64 fuse_bits;
  1463. u32 ret;
  1464. fuse_bits = cpr_read_efuse_row(cpr_vreg, sel_array[0], sel_array[4]);
  1465. ret = (fuse_bits >> sel_array[1]) & ((1 << sel_array[2]) - 1);
  1466. if (ret == sel_array[3])
  1467. ret = 1;
  1468. else
  1469. ret = 0;
  1470. cpr_info(cpr_vreg, "[row:%d] = 0x%llx @%d:%d == %d ?: %s\n",
  1471. sel_array[0], fuse_bits,
  1472. sel_array[1], sel_array[2],
  1473. sel_array[3],
  1474. (ret == 1) ? "yes" : "no");
  1475. return ret;
  1476. }
  1477. static int cpr_voltage_uplift_wa_inc_volt(struct cpr_regulator *cpr_vreg,
  1478. struct device_node *of_node)
  1479. {
  1480. u32 uplift_voltage;
  1481. u32 uplift_max_volt = 0;
  1482. int highest_fuse_corner = cpr_vreg->num_fuse_corners;
  1483. int rc;
  1484. rc = of_property_read_u32(of_node,
  1485. "qcom,cpr-uplift-voltage", &uplift_voltage);
  1486. if (rc < 0) {
  1487. cpr_err(cpr_vreg, "cpr-uplift-voltage is missing, rc = %d", rc);
  1488. return rc;
  1489. }
  1490. rc = of_property_read_u32(of_node,
  1491. "qcom,cpr-uplift-max-volt", &uplift_max_volt);
  1492. if (rc < 0) {
  1493. cpr_err(cpr_vreg, "cpr-uplift-max-volt is missing, rc = %d",
  1494. rc);
  1495. return rc;
  1496. }
  1497. cpr_vreg->pvs_corner_v[highest_fuse_corner] += uplift_voltage;
  1498. if (cpr_vreg->pvs_corner_v[highest_fuse_corner] > uplift_max_volt)
  1499. cpr_vreg->pvs_corner_v[highest_fuse_corner] = uplift_max_volt;
  1500. return rc;
  1501. }
  1502. static int cpr_adjust_init_voltages(struct device_node *of_node,
  1503. struct cpr_regulator *cpr_vreg)
  1504. {
  1505. int tuple_count, tuple_match, i;
  1506. u32 index;
  1507. u32 volt_adjust = 0;
  1508. int len = 0;
  1509. int rc = 0;
  1510. if (!of_find_property(of_node, "qcom,cpr-init-voltage-adjustment",
  1511. &len)) {
  1512. /* No initial voltage adjustment needed. */
  1513. return 0;
  1514. }
  1515. if (cpr_vreg->cpr_fuse_map_count) {
  1516. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  1517. /*
  1518. * No matching index to use for initial voltage
  1519. * adjustment.
  1520. */
  1521. return 0;
  1522. }
  1523. tuple_count = cpr_vreg->cpr_fuse_map_count;
  1524. tuple_match = cpr_vreg->cpr_fuse_map_match;
  1525. } else {
  1526. tuple_count = 1;
  1527. tuple_match = 0;
  1528. }
  1529. if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
  1530. cpr_err(cpr_vreg, "qcom,cpr-init-voltage-adjustment length=%d is invalid\n",
  1531. len);
  1532. return -EINVAL;
  1533. }
  1534. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  1535. index = tuple_match * cpr_vreg->num_fuse_corners
  1536. + i - CPR_FUSE_CORNER_MIN;
  1537. rc = of_property_read_u32_index(of_node,
  1538. "qcom,cpr-init-voltage-adjustment", index,
  1539. &volt_adjust);
  1540. if (rc) {
  1541. cpr_err(cpr_vreg, "could not read qcom,cpr-init-voltage-adjustment index %u, rc=%d\n",
  1542. index, rc);
  1543. return rc;
  1544. }
  1545. if (volt_adjust) {
  1546. cpr_vreg->pvs_corner_v[i] += volt_adjust;
  1547. cpr_info(cpr_vreg, "adjusted initial voltage[%d]: %d -> %d uV\n",
  1548. i, cpr_vreg->pvs_corner_v[i] - volt_adjust,
  1549. cpr_vreg->pvs_corner_v[i]);
  1550. }
  1551. }
  1552. return rc;
  1553. }
  1554. /*
  1555. * Property qcom,cpr-fuse-init-voltage specifies the fuse position of the
  1556. * initial voltage for each fuse corner. MSB of the fuse value is a sign
  1557. * bit, and the remaining bits define the steps of the offset. Each step has
  1558. * units of microvolts defined in the qcom,cpr-fuse-init-voltage-step property.
  1559. * The initial voltages can be calculated using the formula:
  1560. * pvs_corner_v[corner] = ceiling_volt[corner] + (sign * steps * step_size_uv)
  1561. */
  1562. static int cpr_pvs_per_corner_init(struct device_node *of_node,
  1563. struct cpr_regulator *cpr_vreg)
  1564. {
  1565. u64 efuse_bits;
  1566. int i, size, sign, steps, step_size_uv, rc;
  1567. u32 *fuse_sel, *tmp, *ref_uv;
  1568. struct property *prop;
  1569. char *init_volt_str;
  1570. init_volt_str = cpr_vreg->cpr_fuse_redundant
  1571. ? "qcom,cpr-fuse-redun-init-voltage"
  1572. : "qcom,cpr-fuse-init-voltage";
  1573. prop = of_find_property(of_node, init_volt_str, NULL);
  1574. if (!prop) {
  1575. cpr_err(cpr_vreg, "%s is missing\n", init_volt_str);
  1576. return -EINVAL;
  1577. }
  1578. size = prop->length / sizeof(u32);
  1579. if (size != cpr_vreg->num_fuse_corners * 4) {
  1580. cpr_err(cpr_vreg,
  1581. "fuse position for init voltages is invalid\n");
  1582. return -EINVAL;
  1583. }
  1584. fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
  1585. if (!fuse_sel) {
  1586. cpr_err(cpr_vreg, "memory alloc failed.\n");
  1587. return -ENOMEM;
  1588. }
  1589. rc = of_property_read_u32_array(of_node, init_volt_str,
  1590. fuse_sel, size);
  1591. if (rc < 0) {
  1592. cpr_err(cpr_vreg,
  1593. "read cpr-fuse-init-voltage failed, rc = %d\n", rc);
  1594. kfree(fuse_sel);
  1595. return rc;
  1596. }
  1597. rc = of_property_read_u32(of_node, "qcom,cpr-init-voltage-step",
  1598. &step_size_uv);
  1599. if (rc < 0) {
  1600. cpr_err(cpr_vreg,
  1601. "read cpr-init-voltage-step failed, rc = %d\n", rc);
  1602. kfree(fuse_sel);
  1603. return rc;
  1604. }
  1605. ref_uv = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*ref_uv),
  1606. GFP_KERNEL);
  1607. if (!ref_uv) {
  1608. cpr_err(cpr_vreg,
  1609. "Could not allocate memory for reference voltages\n");
  1610. kfree(fuse_sel);
  1611. return -ENOMEM;
  1612. }
  1613. rc = of_property_read_u32_array(of_node, "qcom,cpr-init-voltage-ref",
  1614. &ref_uv[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
  1615. if (rc < 0) {
  1616. cpr_err(cpr_vreg,
  1617. "read qcom,cpr-init-voltage-ref failed, rc = %d\n", rc);
  1618. kfree(fuse_sel);
  1619. kfree(ref_uv);
  1620. return rc;
  1621. }
  1622. tmp = fuse_sel;
  1623. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  1624. efuse_bits = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
  1625. fuse_sel[1], fuse_sel[2], fuse_sel[3]);
  1626. sign = (efuse_bits & (1 << (fuse_sel[2] - 1))) ? -1 : 1;
  1627. steps = efuse_bits & ((1 << (fuse_sel[2] - 1)) - 1);
  1628. cpr_vreg->pvs_corner_v[i] =
  1629. ref_uv[i] + sign * steps * step_size_uv;
  1630. cpr_vreg->pvs_corner_v[i] = DIV_ROUND_UP(
  1631. cpr_vreg->pvs_corner_v[i],
  1632. cpr_vreg->step_volt) *
  1633. cpr_vreg->step_volt;
  1634. cpr_debug(cpr_vreg, "corner %d: sign = %d, steps = %d, volt = %d uV\n",
  1635. i, sign, steps, cpr_vreg->pvs_corner_v[i]);
  1636. fuse_sel += 4;
  1637. }
  1638. rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
  1639. if (rc)
  1640. goto done;
  1641. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  1642. if (cpr_vreg->pvs_corner_v[i]
  1643. > cpr_vreg->fuse_ceiling_volt[i]) {
  1644. cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d above ceiling %d\n",
  1645. i, cpr_vreg->pvs_corner_v[i],
  1646. cpr_vreg->fuse_ceiling_volt[i]);
  1647. cpr_vreg->pvs_corner_v[i]
  1648. = cpr_vreg->fuse_ceiling_volt[i];
  1649. } else if (cpr_vreg->pvs_corner_v[i] <
  1650. cpr_vreg->fuse_floor_volt[i]) {
  1651. cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d below floor %d\n",
  1652. i, cpr_vreg->pvs_corner_v[i],
  1653. cpr_vreg->fuse_floor_volt[i]);
  1654. cpr_vreg->pvs_corner_v[i]
  1655. = cpr_vreg->fuse_floor_volt[i];
  1656. }
  1657. }
  1658. done:
  1659. kfree(tmp);
  1660. kfree(ref_uv);
  1661. return rc;
  1662. }
  1663. /*
  1664. * A single PVS bin is stored in a fuse that's position is defined either
  1665. * in the qcom,pvs-fuse-redun property or in the qcom,pvs-fuse property.
  1666. * The fuse value defined in the qcom,pvs-fuse-redun-sel property is used
  1667. * to pick between the primary or redudant PVS fuse position.
  1668. * After the PVS bin value is read out successfully, it is used as the row
  1669. * index to get initial voltages for each fuse corner from the voltage table
  1670. * defined in the qcom,pvs-voltage-table property.
  1671. */
  1672. static int cpr_pvs_single_bin_init(struct device_node *of_node,
  1673. struct cpr_regulator *cpr_vreg)
  1674. {
  1675. u64 efuse_bits;
  1676. u32 pvs_fuse[4], pvs_fuse_redun_sel[5];
  1677. int rc, i, stripe_size;
  1678. bool redundant;
  1679. size_t pvs_bins;
  1680. u32 *tmp;
  1681. rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun-sel",
  1682. pvs_fuse_redun_sel, 5);
  1683. if (rc < 0) {
  1684. cpr_err(cpr_vreg, "pvs-fuse-redun-sel missing: rc=%d\n", rc);
  1685. return rc;
  1686. }
  1687. redundant = cpr_fuse_is_setting_expected(cpr_vreg, pvs_fuse_redun_sel);
  1688. if (redundant) {
  1689. rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun",
  1690. pvs_fuse, 4);
  1691. if (rc < 0) {
  1692. cpr_err(cpr_vreg, "pvs-fuse-redun missing: rc=%d\n",
  1693. rc);
  1694. return rc;
  1695. }
  1696. } else {
  1697. rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse",
  1698. pvs_fuse, 4);
  1699. if (rc < 0) {
  1700. cpr_err(cpr_vreg, "pvs-fuse missing: rc=%d\n", rc);
  1701. return rc;
  1702. }
  1703. }
  1704. /* Construct PVS process # from the efuse bits */
  1705. efuse_bits = cpr_read_efuse_row(cpr_vreg, pvs_fuse[0], pvs_fuse[3]);
  1706. cpr_vreg->pvs_bin = (efuse_bits >> pvs_fuse[1]) &
  1707. ((1 << pvs_fuse[2]) - 1);
  1708. pvs_bins = 1 << pvs_fuse[2];
  1709. stripe_size = cpr_vreg->num_fuse_corners;
  1710. tmp = kzalloc(sizeof(u32) * pvs_bins * stripe_size, GFP_KERNEL);
  1711. if (!tmp) {
  1712. cpr_err(cpr_vreg, "memory alloc failed\n");
  1713. return -ENOMEM;
  1714. }
  1715. rc = of_property_read_u32_array(of_node, "qcom,pvs-voltage-table",
  1716. tmp, pvs_bins * stripe_size);
  1717. if (rc < 0) {
  1718. cpr_err(cpr_vreg, "pvs-voltage-table missing: rc=%d\n", rc);
  1719. kfree(tmp);
  1720. return rc;
  1721. }
  1722. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
  1723. cpr_vreg->pvs_corner_v[i] = tmp[cpr_vreg->pvs_bin *
  1724. stripe_size + i - 1];
  1725. kfree(tmp);
  1726. rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
  1727. if (rc)
  1728. return rc;
  1729. return 0;
  1730. }
  1731. /*
  1732. * The function reads VDD_MX dependency parameters from device node.
  1733. * Select the qcom,vdd-mx-corner-map length equal to either num_fuse_corners
  1734. * or num_corners based on selected vdd-mx-vmin-method.
  1735. */
  1736. static int cpr_parse_vdd_mx_parameters(struct platform_device *pdev,
  1737. struct cpr_regulator *cpr_vreg)
  1738. {
  1739. struct device_node *of_node = pdev->dev.of_node;
  1740. u32 corner_map_len;
  1741. int rc, len, size;
  1742. rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmax",
  1743. &cpr_vreg->vdd_mx_vmax);
  1744. if (rc < 0) {
  1745. cpr_err(cpr_vreg, "vdd-mx-vmax missing: rc=%d\n", rc);
  1746. return rc;
  1747. }
  1748. rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmin-method",
  1749. &cpr_vreg->vdd_mx_vmin_method);
  1750. if (rc < 0) {
  1751. cpr_err(cpr_vreg, "vdd-mx-vmin-method missing: rc=%d\n",
  1752. rc);
  1753. return rc;
  1754. }
  1755. if (cpr_vreg->vdd_mx_vmin_method > VDD_MX_VMIN_APC_CORNER_MAP) {
  1756. cpr_err(cpr_vreg, "Invalid vdd-mx-vmin-method(%d)\n",
  1757. cpr_vreg->vdd_mx_vmin_method);
  1758. return -EINVAL;
  1759. }
  1760. switch (cpr_vreg->vdd_mx_vmin_method) {
  1761. case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
  1762. corner_map_len = cpr_vreg->num_fuse_corners;
  1763. break;
  1764. case VDD_MX_VMIN_APC_CORNER_MAP:
  1765. corner_map_len = cpr_vreg->num_corners;
  1766. break;
  1767. default:
  1768. cpr_vreg->vdd_mx_corner_map = NULL;
  1769. return 0;
  1770. }
  1771. if (!of_find_property(of_node, "qcom,vdd-mx-corner-map", &len)) {
  1772. cpr_err(cpr_vreg, "qcom,vdd-mx-corner-map missing");
  1773. return -EINVAL;
  1774. }
  1775. size = len / sizeof(u32);
  1776. if (size != corner_map_len) {
  1777. cpr_err(cpr_vreg,
  1778. "qcom,vdd-mx-corner-map length=%d is invalid: required:%u\n",
  1779. size, corner_map_len);
  1780. return -EINVAL;
  1781. }
  1782. cpr_vreg->vdd_mx_corner_map = devm_kzalloc(&pdev->dev,
  1783. (corner_map_len + 1) * sizeof(*cpr_vreg->vdd_mx_corner_map),
  1784. GFP_KERNEL);
  1785. if (!cpr_vreg->vdd_mx_corner_map) {
  1786. cpr_err(cpr_vreg,
  1787. "Can't allocate memory for cpr_vreg->vdd_mx_corner_map\n");
  1788. return -ENOMEM;
  1789. }
  1790. rc = of_property_read_u32_array(of_node,
  1791. "qcom,vdd-mx-corner-map",
  1792. &cpr_vreg->vdd_mx_corner_map[1],
  1793. corner_map_len);
  1794. if (rc)
  1795. cpr_err(cpr_vreg,
  1796. "read qcom,vdd-mx-corner-map failed, rc = %d\n", rc);
  1797. return rc;
  1798. }
  1799. #define MAX_CHARS_PER_INT 10
  1800. /*
  1801. * The initial voltage for each fuse corner may be determined by one of two
  1802. * possible styles of fuse. If qcom,cpr-fuse-init-voltage is present, then
  1803. * the initial voltages are encoded in a fuse for each fuse corner. If it is
  1804. * not present, then the initial voltages are all determined using a single
  1805. * PVS bin fuse value.
  1806. */
  1807. static int cpr_pvs_init(struct platform_device *pdev,
  1808. struct cpr_regulator *cpr_vreg)
  1809. {
  1810. struct device_node *of_node = pdev->dev.of_node;
  1811. int highest_fuse_corner = cpr_vreg->num_fuse_corners;
  1812. int i, rc, pos;
  1813. size_t buflen;
  1814. char *buf;
  1815. rc = of_property_read_u32(of_node, "qcom,cpr-apc-volt-step",
  1816. &cpr_vreg->step_volt);
  1817. if (rc < 0) {
  1818. cpr_err(cpr_vreg, "read cpr-apc-volt-step failed, rc = %d\n",
  1819. rc);
  1820. return rc;
  1821. } else if (cpr_vreg->step_volt == 0) {
  1822. cpr_err(cpr_vreg, "apc voltage step size can't be set to 0.\n");
  1823. return -EINVAL;
  1824. }
  1825. if (of_find_property(of_node, "qcom,cpr-fuse-init-voltage", NULL)) {
  1826. rc = cpr_pvs_per_corner_init(of_node, cpr_vreg);
  1827. if (rc < 0) {
  1828. cpr_err(cpr_vreg, "get pvs per corner failed, rc = %d",
  1829. rc);
  1830. return rc;
  1831. }
  1832. } else {
  1833. rc = cpr_pvs_single_bin_init(of_node, cpr_vreg);
  1834. if (rc < 0) {
  1835. cpr_err(cpr_vreg,
  1836. "get pvs from single bin failed, rc = %d", rc);
  1837. return rc;
  1838. }
  1839. }
  1840. if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
  1841. rc = cpr_voltage_uplift_wa_inc_volt(cpr_vreg, of_node);
  1842. if (rc < 0) {
  1843. cpr_err(cpr_vreg, "pvs volt uplift wa apply failed: %d",
  1844. rc);
  1845. return rc;
  1846. }
  1847. }
  1848. /*
  1849. * Allow the highest fuse corner's PVS voltage to define the ceiling
  1850. * voltage for that corner in order to support SoC's in which variable
  1851. * ceiling values are required.
  1852. */
  1853. if (cpr_vreg->pvs_corner_v[highest_fuse_corner] >
  1854. cpr_vreg->fuse_ceiling_volt[highest_fuse_corner])
  1855. cpr_vreg->fuse_ceiling_volt[highest_fuse_corner] =
  1856. cpr_vreg->pvs_corner_v[highest_fuse_corner];
  1857. /*
  1858. * Restrict all fuse corner PVS voltages based upon per corner
  1859. * ceiling and floor voltages.
  1860. */
  1861. for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
  1862. if (cpr_vreg->pvs_corner_v[i] > cpr_vreg->fuse_ceiling_volt[i])
  1863. cpr_vreg->pvs_corner_v[i]
  1864. = cpr_vreg->fuse_ceiling_volt[i];
  1865. else if (cpr_vreg->pvs_corner_v[i]
  1866. < cpr_vreg->fuse_floor_volt[i])
  1867. cpr_vreg->pvs_corner_v[i]
  1868. = cpr_vreg->fuse_floor_volt[i];
  1869. cpr_vreg->ceiling_max
  1870. = cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
  1871. /*
  1872. * Log ceiling, floor, and inital voltages since they are critical for
  1873. * all CPR debugging.
  1874. */
  1875. buflen = cpr_vreg->num_fuse_corners * (MAX_CHARS_PER_INT + 2)
  1876. * sizeof(*buf);
  1877. buf = kzalloc(buflen, GFP_KERNEL);
  1878. if (buf == NULL) {
  1879. cpr_err(cpr_vreg, "Could not allocate memory for corner voltage logging\n");
  1880. return 0;
  1881. }
  1882. for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
  1883. pos += scnprintf(buf + pos, buflen - pos, "%u%s",
  1884. cpr_vreg->pvs_corner_v[i],
  1885. i < highest_fuse_corner ? " " : "");
  1886. cpr_info(cpr_vreg, "pvs voltage: [%s] uV\n", buf);
  1887. for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
  1888. pos += scnprintf(buf + pos, buflen - pos, "%d%s",
  1889. cpr_vreg->fuse_ceiling_volt[i],
  1890. i < highest_fuse_corner ? " " : "");
  1891. cpr_info(cpr_vreg, "ceiling voltage: [%s] uV\n", buf);
  1892. for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
  1893. pos += scnprintf(buf + pos, buflen - pos, "%d%s",
  1894. cpr_vreg->fuse_floor_volt[i],
  1895. i < highest_fuse_corner ? " " : "");
  1896. cpr_info(cpr_vreg, "floor voltage: [%s] uV\n", buf);
  1897. kfree(buf);
  1898. return 0;
  1899. }
  1900. #define CPR_PROP_READ_U32(cpr_vreg, of_node, cpr_property, cpr_config, rc) \
  1901. do { \
  1902. if (!rc) { \
  1903. rc = of_property_read_u32(of_node, \
  1904. "qcom," cpr_property, \
  1905. cpr_config); \
  1906. if (rc) { \
  1907. cpr_err(cpr_vreg, "Missing " #cpr_property \
  1908. ": rc = %d\n", rc); \
  1909. } \
  1910. } \
  1911. } while (0)
  1912. static int cpr_apc_init(struct platform_device *pdev,
  1913. struct cpr_regulator *cpr_vreg)
  1914. {
  1915. struct device_node *of_node = pdev->dev.of_node;
  1916. int i, rc = 0;
  1917. for (i = 0; i < ARRAY_SIZE(vdd_apc_name); i++) {
  1918. cpr_vreg->vdd_apc = devm_regulator_get_optional(&pdev->dev,
  1919. vdd_apc_name[i]);
  1920. rc = PTR_RET(cpr_vreg->vdd_apc);
  1921. if (!IS_ERR_OR_NULL(cpr_vreg->vdd_apc))
  1922. break;
  1923. }
  1924. if (rc) {
  1925. if (rc != -EPROBE_DEFER)
  1926. cpr_err(cpr_vreg, "devm_regulator_get: rc=%d\n", rc);
  1927. return rc;
  1928. }
  1929. /* Check dependencies */
  1930. if (of_find_property(of_node, "vdd-mx-supply", NULL)) {
  1931. cpr_vreg->vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
  1932. if (IS_ERR_OR_NULL(cpr_vreg->vdd_mx)) {
  1933. rc = PTR_RET(cpr_vreg->vdd_mx);
  1934. if (rc != -EPROBE_DEFER)
  1935. cpr_err(cpr_vreg,
  1936. "devm_regulator_get: vdd-mx: rc=%d\n",
  1937. rc);
  1938. return rc;
  1939. }
  1940. }
  1941. return 0;
  1942. }
  1943. static void cpr_apc_exit(struct cpr_regulator *cpr_vreg)
  1944. {
  1945. if (cpr_vreg->vreg_enabled) {
  1946. regulator_disable(cpr_vreg->vdd_apc);
  1947. if (cpr_vreg->vdd_mx)
  1948. regulator_disable(cpr_vreg->vdd_mx);
  1949. }
  1950. }
  1951. static int cpr_voltage_uplift_wa_inc_quot(struct cpr_regulator *cpr_vreg,
  1952. struct device_node *of_node)
  1953. {
  1954. u32 delta_quot[3];
  1955. int rc, i;
  1956. rc = of_property_read_u32_array(of_node,
  1957. "qcom,cpr-uplift-quotient", delta_quot, 3);
  1958. if (rc < 0) {
  1959. cpr_err(cpr_vreg, "cpr-uplift-quotient is missing: %d", rc);
  1960. return rc;
  1961. }
  1962. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
  1963. cpr_vreg->cpr_fuse_target_quot[i] += delta_quot[i-1];
  1964. return rc;
  1965. }
  1966. static void cpr_parse_pvs_version_fuse(struct cpr_regulator *cpr_vreg,
  1967. struct device_node *of_node)
  1968. {
  1969. int rc;
  1970. u64 fuse_bits;
  1971. u32 fuse_sel[4];
  1972. rc = of_property_read_u32_array(of_node,
  1973. "qcom,pvs-version-fuse-sel", fuse_sel, 4);
  1974. if (!rc) {
  1975. fuse_bits = cpr_read_efuse_row(cpr_vreg,
  1976. fuse_sel[0], fuse_sel[3]);
  1977. cpr_vreg->pvs_version = (fuse_bits >> fuse_sel[1]) &
  1978. ((1 << fuse_sel[2]) - 1);
  1979. cpr_info(cpr_vreg, "[row: %d]: 0x%llx, pvs_version = %d\n",
  1980. fuse_sel[0], fuse_bits, cpr_vreg->pvs_version);
  1981. } else {
  1982. cpr_vreg->pvs_version = 0;
  1983. }
  1984. }
  1985. /**
  1986. * cpr_get_open_loop_voltage() - fill the open_loop_volt array with linearly
  1987. * interpolated open-loop CPR voltage values.
  1988. * @cpr_vreg: Handle to the cpr-regulator device
  1989. * @dev: Device pointer for the cpr-regulator device
  1990. * @corner_max: Array of length (cpr_vreg->num_fuse_corners + 1) which maps from
  1991. * fuse corners to the highest virtual corner corresponding to a
  1992. * given fuse corner
  1993. * @freq_map: Array of length (cpr_vreg->num_corners + 1) which maps from
  1994. * virtual corners to frequencies in Hz.
  1995. * @maps_valid: Boolean which indicates if the values in corner_max and freq_map
  1996. * are valid. If they are not valid, then the open_loop_volt
  1997. * values are not interpolated.
  1998. */
  1999. static int cpr_get_open_loop_voltage(struct cpr_regulator *cpr_vreg,
  2000. struct device *dev, const u32 *corner_max, const u32 *freq_map,
  2001. bool maps_valid)
  2002. {
  2003. int rc = 0;
  2004. int i, j;
  2005. u64 volt_high, volt_low, freq_high, freq_low, freq, temp, temp_limit;
  2006. u32 *max_factor = NULL;
  2007. cpr_vreg->open_loop_volt = devm_kzalloc(dev,
  2008. sizeof(int) * (cpr_vreg->num_corners + 1), GFP_KERNEL);
  2009. if (!cpr_vreg->open_loop_volt) {
  2010. cpr_err(cpr_vreg,
  2011. "Can't allocate memory for cpr_vreg->open_loop_volt\n");
  2012. return -ENOMEM;
  2013. }
  2014. /*
  2015. * Set open loop voltage to be equal to per-fuse-corner initial voltage
  2016. * by default. This ensures that the open loop voltage is valid for
  2017. * all virtual corners even if some virtual corner to frequency mappings
  2018. * are missing. It also ensures that the voltage is valid for the
  2019. * higher corners not utilized by a given speed-bin.
  2020. */
  2021. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
  2022. cpr_vreg->open_loop_volt[i]
  2023. = cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]];
  2024. if (!maps_valid || !corner_max || !freq_map
  2025. || !of_find_property(dev->of_node,
  2026. "qcom,cpr-voltage-scaling-factor-max", NULL)) {
  2027. /* Not using interpolation */
  2028. return 0;
  2029. }
  2030. max_factor
  2031. = kzalloc(sizeof(*max_factor) * (cpr_vreg->num_fuse_corners + 1),
  2032. GFP_KERNEL);
  2033. if (!max_factor) {
  2034. cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
  2035. return -ENOMEM;
  2036. }
  2037. rc = of_property_read_u32_array(dev->of_node,
  2038. "qcom,cpr-voltage-scaling-factor-max",
  2039. &max_factor[CPR_FUSE_CORNER_MIN],
  2040. cpr_vreg->num_fuse_corners);
  2041. if (rc) {
  2042. cpr_debug(cpr_vreg, "failed to read qcom,cpr-voltage-scaling-factor-max; initial voltage interpolation not possible\n");
  2043. kfree(max_factor);
  2044. return 0;
  2045. }
  2046. for (j = CPR_FUSE_CORNER_MIN + 1; j <= cpr_vreg->num_fuse_corners;
  2047. j++) {
  2048. freq_high = freq_map[corner_max[j]];
  2049. freq_low = freq_map[corner_max[j - 1]];
  2050. volt_high = cpr_vreg->pvs_corner_v[j];
  2051. volt_low = cpr_vreg->pvs_corner_v[j - 1];
  2052. if (freq_high <= freq_low || volt_high <= volt_low)
  2053. continue;
  2054. for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
  2055. freq = freq_map[i];
  2056. if (freq_high <= freq)
  2057. continue;
  2058. temp = (freq_high - freq) * (volt_high - volt_low);
  2059. do_div(temp, (u32)(freq_high - freq_low));
  2060. /*
  2061. * max_factor[j] has units of uV/MHz while freq values
  2062. * have units of Hz. Divide by 1000000 to convert.
  2063. */
  2064. temp_limit = (freq_high - freq) * max_factor[j];
  2065. do_div(temp_limit, 1000000);
  2066. cpr_vreg->open_loop_volt[i]
  2067. = volt_high - min(temp, temp_limit);
  2068. cpr_vreg->open_loop_volt[i]
  2069. = DIV_ROUND_UP(cpr_vreg->open_loop_volt[i],
  2070. cpr_vreg->step_volt)
  2071. * cpr_vreg->step_volt;
  2072. }
  2073. }
  2074. kfree(max_factor);
  2075. return 0;
  2076. }
  2077. /*
  2078. * Limit the per-virtual-corner open-loop voltages using the per-virtual-corner
  2079. * ceiling and floor voltage values. This must be called only after the
  2080. * open_loop_volt, ceiling, and floor arrays have all been initialized.
  2081. */
  2082. static int cpr_limit_open_loop_voltage(struct cpr_regulator *cpr_vreg)
  2083. {
  2084. int i;
  2085. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  2086. if (cpr_vreg->open_loop_volt[i] > cpr_vreg->ceiling_volt[i])
  2087. cpr_vreg->open_loop_volt[i] = cpr_vreg->ceiling_volt[i];
  2088. else if (cpr_vreg->open_loop_volt[i] < cpr_vreg->floor_volt[i])
  2089. cpr_vreg->open_loop_volt[i] = cpr_vreg->floor_volt[i];
  2090. }
  2091. return 0;
  2092. }
  2093. /*
  2094. * Fill an OPP table for the cpr-regulator device struct with pairs of
  2095. * <virtual voltage corner number, open loop voltage> tuples.
  2096. */
  2097. static int cpr_populate_opp_table(struct cpr_regulator *cpr_vreg,
  2098. struct device *dev)
  2099. {
  2100. int i, rc = 0;
  2101. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  2102. rc |= dev_pm_opp_add(dev, i, cpr_vreg->open_loop_volt[i]);
  2103. if (rc)
  2104. cpr_debug(cpr_vreg, "could not add OPP entry <%d, %d>, rc=%d\n",
  2105. i, cpr_vreg->open_loop_volt[i], rc);
  2106. }
  2107. if (rc)
  2108. cpr_err(cpr_vreg, "adding OPP entry failed - OPP may not be enabled, rc=%d\n",
  2109. rc);
  2110. return 0;
  2111. }
  2112. /*
  2113. * Conditionally reduce the per-virtual-corner ceiling voltages if certain
  2114. * device tree flags are present. This must be called only after the ceiling
  2115. * array has been initialized and the open_loop_volt array values have been
  2116. * initialized and limited to the existing floor to ceiling voltage range.
  2117. */
  2118. static int cpr_reduce_ceiling_voltage(struct cpr_regulator *cpr_vreg,
  2119. struct device *dev)
  2120. {
  2121. bool reduce_to_fuse_open_loop, reduce_to_interpolated_open_loop;
  2122. int i;
  2123. reduce_to_fuse_open_loop = of_property_read_bool(dev->of_node,
  2124. "qcom,cpr-init-voltage-as-ceiling");
  2125. reduce_to_interpolated_open_loop = of_property_read_bool(dev->of_node,
  2126. "qcom,cpr-scaled-init-voltage-as-ceiling");
  2127. if (!reduce_to_fuse_open_loop && !reduce_to_interpolated_open_loop)
  2128. return 0;
  2129. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  2130. if (reduce_to_interpolated_open_loop &&
  2131. cpr_vreg->open_loop_volt[i] < cpr_vreg->ceiling_volt[i])
  2132. cpr_vreg->ceiling_volt[i] = cpr_vreg->open_loop_volt[i];
  2133. else if (reduce_to_fuse_open_loop &&
  2134. cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]
  2135. < cpr_vreg->ceiling_volt[i])
  2136. cpr_vreg->ceiling_volt[i]
  2137. = max((u32)cpr_vreg->floor_volt[i],
  2138. cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]);
  2139. cpr_debug(cpr_vreg, "lowered ceiling[%d] = %d uV\n",
  2140. i, cpr_vreg->ceiling_volt[i]);
  2141. }
  2142. return 0;
  2143. }
  2144. static int cpr_adjust_target_quot_offsets(struct platform_device *pdev,
  2145. struct cpr_regulator *cpr_vreg)
  2146. {
  2147. struct device_node *of_node = pdev->dev.of_node;
  2148. int tuple_count, tuple_match, i;
  2149. u32 index;
  2150. u32 quot_offset_adjust = 0;
  2151. int len = 0;
  2152. int rc = 0;
  2153. char *quot_offset_str;
  2154. quot_offset_str = "qcom,cpr-quot-offset-adjustment";
  2155. if (!of_find_property(of_node, quot_offset_str, &len)) {
  2156. /* No static quotient adjustment needed. */
  2157. return 0;
  2158. }
  2159. if (cpr_vreg->cpr_fuse_map_count) {
  2160. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  2161. /* No matching index to use for quotient adjustment. */
  2162. return 0;
  2163. }
  2164. tuple_count = cpr_vreg->cpr_fuse_map_count;
  2165. tuple_match = cpr_vreg->cpr_fuse_map_match;
  2166. } else {
  2167. tuple_count = 1;
  2168. tuple_match = 0;
  2169. }
  2170. if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
  2171. cpr_err(cpr_vreg, "%s length=%d is invalid\n", quot_offset_str,
  2172. len);
  2173. return -EINVAL;
  2174. }
  2175. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  2176. index = tuple_match * cpr_vreg->num_fuse_corners
  2177. + i - CPR_FUSE_CORNER_MIN;
  2178. rc = of_property_read_u32_index(of_node, quot_offset_str, index,
  2179. &quot_offset_adjust);
  2180. if (rc) {
  2181. cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
  2182. quot_offset_str, index, rc);
  2183. return rc;
  2184. }
  2185. if (quot_offset_adjust) {
  2186. cpr_vreg->fuse_quot_offset[i] += quot_offset_adjust;
  2187. cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
  2188. i, cpr_vreg->fuse_quot_offset[i]);
  2189. }
  2190. }
  2191. return rc;
  2192. }
  2193. static int cpr_get_fuse_quot_offset(struct cpr_regulator *cpr_vreg,
  2194. struct platform_device *pdev,
  2195. struct cpr_quot_scale *quot_scale)
  2196. {
  2197. struct device *dev = &pdev->dev;
  2198. struct property *prop;
  2199. u32 *fuse_sel, *tmp, *offset_multiplier = NULL;
  2200. int rc = 0, i, size, len;
  2201. char *quot_offset_str;
  2202. quot_offset_str = cpr_vreg->cpr_fuse_redundant
  2203. ? "qcom,cpr-fuse-redun-quot-offset"
  2204. : "qcom,cpr-fuse-quot-offset";
  2205. prop = of_find_property(dev->of_node, quot_offset_str, NULL);
  2206. if (!prop) {
  2207. cpr_debug(cpr_vreg, "%s not present\n", quot_offset_str);
  2208. return 0;
  2209. } else {
  2210. size = prop->length / sizeof(u32);
  2211. if (size != cpr_vreg->num_fuse_corners * 4) {
  2212. cpr_err(cpr_vreg, "fuse position for quot offset is invalid\n");
  2213. return -EINVAL;
  2214. }
  2215. }
  2216. fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
  2217. if (!fuse_sel) {
  2218. cpr_err(cpr_vreg, "memory alloc failed.\n");
  2219. return -ENOMEM;
  2220. }
  2221. rc = of_property_read_u32_array(dev->of_node, quot_offset_str,
  2222. fuse_sel, size);
  2223. if (rc < 0) {
  2224. cpr_err(cpr_vreg, "read %s failed, rc = %d\n", quot_offset_str,
  2225. rc);
  2226. kfree(fuse_sel);
  2227. return rc;
  2228. }
  2229. cpr_vreg->fuse_quot_offset = devm_kzalloc(dev,
  2230. sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
  2231. GFP_KERNEL);
  2232. if (!cpr_vreg->fuse_quot_offset) {
  2233. cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->fuse_quot_offset\n");
  2234. kfree(fuse_sel);
  2235. return -ENOMEM;
  2236. }
  2237. if (!of_find_property(dev->of_node,
  2238. "qcom,cpr-fuse-quot-offset-scale", &len)) {
  2239. cpr_debug(cpr_vreg, "qcom,cpr-fuse-quot-offset-scale not present\n");
  2240. } else {
  2241. if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
  2242. cpr_err(cpr_vreg, "the size of qcom,cpr-fuse-quot-offset-scale is invalid\n");
  2243. kfree(fuse_sel);
  2244. return -EINVAL;
  2245. }
  2246. offset_multiplier = kzalloc(sizeof(*offset_multiplier)
  2247. * (cpr_vreg->num_fuse_corners + 1),
  2248. GFP_KERNEL);
  2249. if (!offset_multiplier) {
  2250. cpr_err(cpr_vreg, "memory alloc failed.\n");
  2251. kfree(fuse_sel);
  2252. return -ENOMEM;
  2253. }
  2254. rc = of_property_read_u32_array(dev->of_node,
  2255. "qcom,cpr-fuse-quot-offset-scale",
  2256. &offset_multiplier[1],
  2257. cpr_vreg->num_fuse_corners);
  2258. if (rc < 0) {
  2259. cpr_err(cpr_vreg, "read qcom,cpr-fuse-quot-offset-scale failed, rc = %d\n",
  2260. rc);
  2261. kfree(fuse_sel);
  2262. goto out;
  2263. }
  2264. }
  2265. tmp = fuse_sel;
  2266. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  2267. cpr_vreg->fuse_quot_offset[i] = cpr_read_efuse_param(cpr_vreg,
  2268. fuse_sel[0], fuse_sel[1], fuse_sel[2],
  2269. fuse_sel[3]);
  2270. if (offset_multiplier)
  2271. cpr_vreg->fuse_quot_offset[i] *= offset_multiplier[i];
  2272. fuse_sel += 4;
  2273. }
  2274. rc = cpr_adjust_target_quot_offsets(pdev, cpr_vreg);
  2275. kfree(tmp);
  2276. out:
  2277. kfree(offset_multiplier);
  2278. return rc;
  2279. }
  2280. /*
  2281. * Adjust the per-virtual-corner open loop voltage with an offset specfied by a
  2282. * device-tree property. This must be called after open-loop voltage scaling.
  2283. */
  2284. static int cpr_virtual_corner_voltage_adjust(struct cpr_regulator *cpr_vreg,
  2285. struct device *dev)
  2286. {
  2287. char *prop_name = "qcom,cpr-virtual-corner-init-voltage-adjustment";
  2288. int i, rc, tuple_count, tuple_match, index, len;
  2289. u32 voltage_adjust;
  2290. if (!of_find_property(dev->of_node, prop_name, &len)) {
  2291. cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
  2292. return 0;
  2293. }
  2294. if (cpr_vreg->cpr_fuse_map_count) {
  2295. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  2296. /* No matching index to use for voltage adjustment. */
  2297. return 0;
  2298. }
  2299. tuple_count = cpr_vreg->cpr_fuse_map_count;
  2300. tuple_match = cpr_vreg->cpr_fuse_map_match;
  2301. } else {
  2302. tuple_count = 1;
  2303. tuple_match = 0;
  2304. }
  2305. if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
  2306. cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
  2307. len);
  2308. return -EINVAL;
  2309. }
  2310. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  2311. index = tuple_match * cpr_vreg->num_corners
  2312. + i - CPR_CORNER_MIN;
  2313. rc = of_property_read_u32_index(dev->of_node, prop_name,
  2314. index, &voltage_adjust);
  2315. if (rc) {
  2316. cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
  2317. prop_name, index, rc);
  2318. return rc;
  2319. }
  2320. if (voltage_adjust) {
  2321. cpr_vreg->open_loop_volt[i] += (int)voltage_adjust;
  2322. cpr_info(cpr_vreg, "corner=%d adjusted open-loop voltage=%d\n",
  2323. i, cpr_vreg->open_loop_volt[i]);
  2324. }
  2325. }
  2326. return 0;
  2327. }
  2328. /*
  2329. * Adjust the per-virtual-corner quot with an offset specfied by a
  2330. * device-tree property. This must be called after the quot-scaling adjustments
  2331. * are completed.
  2332. */
  2333. static int cpr_virtual_corner_quot_adjust(struct cpr_regulator *cpr_vreg,
  2334. struct device *dev)
  2335. {
  2336. char *prop_name = "qcom,cpr-virtual-corner-quotient-adjustment";
  2337. int i, rc, tuple_count, tuple_match, index, len;
  2338. u32 quot_adjust;
  2339. if (!of_find_property(dev->of_node, prop_name, &len)) {
  2340. cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
  2341. return 0;
  2342. }
  2343. if (cpr_vreg->cpr_fuse_map_count) {
  2344. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  2345. /* No matching index to use for quotient adjustment. */
  2346. return 0;
  2347. }
  2348. tuple_count = cpr_vreg->cpr_fuse_map_count;
  2349. tuple_match = cpr_vreg->cpr_fuse_map_match;
  2350. } else {
  2351. tuple_count = 1;
  2352. tuple_match = 0;
  2353. }
  2354. if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
  2355. cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
  2356. len);
  2357. return -EINVAL;
  2358. }
  2359. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  2360. index = tuple_match * cpr_vreg->num_corners
  2361. + i - CPR_CORNER_MIN;
  2362. rc = of_property_read_u32_index(dev->of_node, prop_name,
  2363. index, &quot_adjust);
  2364. if (rc) {
  2365. cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
  2366. prop_name, index, rc);
  2367. return rc;
  2368. }
  2369. if (quot_adjust) {
  2370. cpr_vreg->quot_adjust[i] -= (int)quot_adjust;
  2371. cpr_info(cpr_vreg, "corner=%d adjusted quotient=%d\n",
  2372. i,
  2373. cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
  2374. - cpr_vreg->quot_adjust[i]);
  2375. }
  2376. }
  2377. return 0;
  2378. }
  2379. /*
  2380. * cpr_get_corner_quot_adjustment() -- get the quot_adjust for each corner.
  2381. *
  2382. * Get the virtual corner to fuse corner mapping and virtual corner to APC clock
  2383. * frequency mapping from device tree.
  2384. * Calculate the quotient adjustment scaling factor for those corners mapping to
  2385. * all fuse corners except for the lowest one using linear interpolation.
  2386. * Calculate the quotient adjustment for each of these virtual corners using the
  2387. * min of the calculated scaling factor and the constant max scaling factor
  2388. * defined for each fuse corner in device tree.
  2389. */
  2390. static int cpr_get_corner_quot_adjustment(struct cpr_regulator *cpr_vreg,
  2391. struct device *dev)
  2392. {
  2393. int rc = 0;
  2394. int highest_fuse_corner = cpr_vreg->num_fuse_corners;
  2395. int i, j, size;
  2396. struct property *prop;
  2397. bool corners_mapped, match_found;
  2398. u32 *tmp, *freq_map = NULL;
  2399. u32 corner, freq_corner;
  2400. u32 *freq_max = NULL;
  2401. u32 *scaling = NULL;
  2402. u32 *max_factor = NULL;
  2403. u32 *corner_max = NULL;
  2404. bool maps_valid = false;
  2405. prop = of_find_property(dev->of_node, "qcom,cpr-corner-map", NULL);
  2406. if (prop) {
  2407. size = prop->length / sizeof(u32);
  2408. corners_mapped = true;
  2409. } else {
  2410. size = cpr_vreg->num_fuse_corners;
  2411. corners_mapped = false;
  2412. }
  2413. cpr_vreg->corner_map = devm_kzalloc(dev, sizeof(int) * (size + 1),
  2414. GFP_KERNEL);
  2415. if (!cpr_vreg->corner_map) {
  2416. cpr_err(cpr_vreg,
  2417. "Can't allocate memory for cpr_vreg->corner_map\n");
  2418. return -ENOMEM;
  2419. }
  2420. cpr_vreg->num_corners = size;
  2421. cpr_vreg->quot_adjust = devm_kzalloc(dev,
  2422. sizeof(u32) * (cpr_vreg->num_corners + 1),
  2423. GFP_KERNEL);
  2424. if (!cpr_vreg->quot_adjust) {
  2425. cpr_err(cpr_vreg,
  2426. "Can't allocate memory for cpr_vreg->quot_adjust\n");
  2427. return -ENOMEM;
  2428. }
  2429. if (!corners_mapped) {
  2430. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
  2431. i++)
  2432. cpr_vreg->corner_map[i] = i;
  2433. goto free_arrays;
  2434. } else {
  2435. rc = of_property_read_u32_array(dev->of_node,
  2436. "qcom,cpr-corner-map", &cpr_vreg->corner_map[1], size);
  2437. if (rc) {
  2438. cpr_err(cpr_vreg,
  2439. "qcom,cpr-corner-map missing, rc = %d\n", rc);
  2440. return rc;
  2441. }
  2442. /*
  2443. * Verify that the virtual corner to fuse corner mapping is
  2444. * valid.
  2445. */
  2446. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  2447. if (cpr_vreg->corner_map[i] > cpr_vreg->num_fuse_corners
  2448. || cpr_vreg->corner_map[i] < CPR_FUSE_CORNER_MIN) {
  2449. cpr_err(cpr_vreg, "qcom,cpr-corner-map contains an element %d which isn't in the allowed range [%d, %d]\n",
  2450. cpr_vreg->corner_map[i],
  2451. CPR_FUSE_CORNER_MIN,
  2452. cpr_vreg->num_fuse_corners);
  2453. return -EINVAL;
  2454. }
  2455. }
  2456. }
  2457. prop = of_find_property(dev->of_node,
  2458. "qcom,cpr-speed-bin-max-corners", NULL);
  2459. if (!prop) {
  2460. cpr_debug(cpr_vreg, "qcom,cpr-speed-bin-max-corner missing\n");
  2461. goto free_arrays;
  2462. }
  2463. size = prop->length / sizeof(u32);
  2464. tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
  2465. if (!tmp) {
  2466. cpr_err(cpr_vreg, "memory alloc failed\n");
  2467. return -ENOMEM;
  2468. }
  2469. rc = of_property_read_u32_array(dev->of_node,
  2470. "qcom,cpr-speed-bin-max-corners", tmp, size);
  2471. if (rc < 0) {
  2472. kfree(tmp);
  2473. cpr_err(cpr_vreg,
  2474. "get cpr-speed-bin-max-corners failed, rc = %d\n", rc);
  2475. return rc;
  2476. }
  2477. corner_max = kzalloc((cpr_vreg->num_fuse_corners + 1)
  2478. * sizeof(*corner_max), GFP_KERNEL);
  2479. freq_max = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*freq_max),
  2480. GFP_KERNEL);
  2481. if (corner_max == NULL || freq_max == NULL) {
  2482. cpr_err(cpr_vreg, "Could not allocate memory for quotient scaling arrays\n");
  2483. kfree(tmp);
  2484. rc = -ENOMEM;
  2485. goto free_arrays;
  2486. }
  2487. /*
  2488. * Get the maximum virtual corner for each fuse corner based upon the
  2489. * speed_bin and pvs_version values.
  2490. */
  2491. match_found = false;
  2492. for (i = 0; i < size; i += cpr_vreg->num_fuse_corners + 2) {
  2493. if (tmp[i] != cpr_vreg->speed_bin &&
  2494. tmp[i] != FUSE_PARAM_MATCH_ANY)
  2495. continue;
  2496. if (tmp[i + 1] != cpr_vreg->pvs_version &&
  2497. tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
  2498. continue;
  2499. for (j = CPR_FUSE_CORNER_MIN;
  2500. j <= cpr_vreg->num_fuse_corners; j++)
  2501. corner_max[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
  2502. match_found = true;
  2503. break;
  2504. }
  2505. kfree(tmp);
  2506. if (!match_found) {
  2507. cpr_debug(cpr_vreg, "No quotient adjustment possible for speed bin=%u, pvs version=%u\n",
  2508. cpr_vreg->speed_bin, cpr_vreg->pvs_version);
  2509. goto free_arrays;
  2510. }
  2511. /* Verify that fuse corner to max virtual corner mapping is valid. */
  2512. for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++) {
  2513. if (corner_max[i] < CPR_CORNER_MIN
  2514. || corner_max[i] > cpr_vreg->num_corners) {
  2515. cpr_err(cpr_vreg, "Invalid corner=%d in qcom,cpr-speed-bin-max-corners\n",
  2516. corner_max[i]);
  2517. goto free_arrays;
  2518. }
  2519. }
  2520. /*
  2521. * Return success if the virtual corner values read from
  2522. * qcom,cpr-speed-bin-max-corners property are incorrect. This allows
  2523. * the driver to continue to run without quotient scaling.
  2524. */
  2525. for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
  2526. if (corner_max[i] <= corner_max[i - 1]) {
  2527. cpr_err(cpr_vreg, "fuse corner=%d (%u) should be larger than the fuse corner=%d (%u)\n",
  2528. i, corner_max[i], i - 1, corner_max[i - 1]);
  2529. goto free_arrays;
  2530. }
  2531. }
  2532. prop = of_find_property(dev->of_node,
  2533. "qcom,cpr-corner-frequency-map", NULL);
  2534. if (!prop) {
  2535. cpr_debug(cpr_vreg, "qcom,cpr-corner-frequency-map missing\n");
  2536. goto free_arrays;
  2537. }
  2538. size = prop->length / sizeof(u32);
  2539. tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
  2540. if (!tmp) {
  2541. cpr_err(cpr_vreg, "memory alloc failed\n");
  2542. rc = -ENOMEM;
  2543. goto free_arrays;
  2544. }
  2545. rc = of_property_read_u32_array(dev->of_node,
  2546. "qcom,cpr-corner-frequency-map", tmp, size);
  2547. if (rc < 0) {
  2548. cpr_err(cpr_vreg,
  2549. "get cpr-corner-frequency-map failed, rc = %d\n", rc);
  2550. kfree(tmp);
  2551. goto free_arrays;
  2552. }
  2553. freq_map = kzalloc(sizeof(u32) * (cpr_vreg->num_corners + 1),
  2554. GFP_KERNEL);
  2555. if (!freq_map) {
  2556. cpr_err(cpr_vreg, "memory alloc for freq_map failed!\n");
  2557. kfree(tmp);
  2558. rc = -ENOMEM;
  2559. goto free_arrays;
  2560. }
  2561. for (i = 0; i < size; i += 2) {
  2562. corner = tmp[i];
  2563. if ((corner < 1) || (corner > cpr_vreg->num_corners)) {
  2564. cpr_err(cpr_vreg,
  2565. "corner should be in 1~%d range: %d\n",
  2566. cpr_vreg->num_corners, corner);
  2567. continue;
  2568. }
  2569. freq_map[corner] = tmp[i + 1];
  2570. cpr_debug(cpr_vreg,
  2571. "Frequency at virtual corner %d is %d Hz.\n",
  2572. corner, freq_map[corner]);
  2573. }
  2574. kfree(tmp);
  2575. prop = of_find_property(dev->of_node,
  2576. "qcom,cpr-quot-adjust-scaling-factor-max", NULL);
  2577. if (!prop) {
  2578. cpr_debug(cpr_vreg, "qcom,cpr-quot-adjust-scaling-factor-max missing\n");
  2579. rc = 0;
  2580. goto free_arrays;
  2581. }
  2582. size = prop->length / sizeof(u32);
  2583. if ((size != 1) && (size != cpr_vreg->num_fuse_corners)) {
  2584. cpr_err(cpr_vreg, "The size of qcom,cpr-quot-adjust-scaling-factor-max should be 1 or %d\n",
  2585. cpr_vreg->num_fuse_corners);
  2586. rc = 0;
  2587. goto free_arrays;
  2588. }
  2589. max_factor = kzalloc(sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
  2590. GFP_KERNEL);
  2591. if (!max_factor) {
  2592. cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
  2593. rc = -ENOMEM;
  2594. goto free_arrays;
  2595. }
  2596. /*
  2597. * Leave max_factor[CPR_FUSE_CORNER_MIN ... highest_fuse_corner-1] = 0
  2598. * if cpr-quot-adjust-scaling-factor-max is a single value in order to
  2599. * maintain backward compatibility.
  2600. */
  2601. i = (size == cpr_vreg->num_fuse_corners) ? CPR_FUSE_CORNER_MIN
  2602. : highest_fuse_corner;
  2603. rc = of_property_read_u32_array(dev->of_node,
  2604. "qcom,cpr-quot-adjust-scaling-factor-max",
  2605. &max_factor[i], size);
  2606. if (rc < 0) {
  2607. cpr_debug(cpr_vreg, "could not read qcom,cpr-quot-adjust-scaling-factor-max, rc=%d\n",
  2608. rc);
  2609. rc = 0;
  2610. goto free_arrays;
  2611. }
  2612. /*
  2613. * Get the quotient adjustment scaling factor, according to:
  2614. * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
  2615. * / (freq(corner_N) - freq(corner_N-1)), max_factor)
  2616. *
  2617. * QUOT(corner_N): quotient read from fuse for fuse corner N
  2618. * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
  2619. * freq(corner_N): max frequency in MHz supported by fuse corner N
  2620. * freq(corner_N-1): max frequency in MHz supported by fuse corner
  2621. * (N - 1)
  2622. */
  2623. for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
  2624. freq_max[i] = freq_map[corner_max[i]];
  2625. for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
  2626. if (freq_max[i] <= freq_max[i - 1] || freq_max[i - 1] == 0) {
  2627. cpr_err(cpr_vreg, "fuse corner %d freq=%u should be larger than fuse corner %d freq=%u\n",
  2628. i, freq_max[i], i - 1, freq_max[i - 1]);
  2629. rc = -EINVAL;
  2630. goto free_arrays;
  2631. }
  2632. }
  2633. scaling = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*scaling),
  2634. GFP_KERNEL);
  2635. if (!scaling) {
  2636. cpr_err(cpr_vreg, "Could not allocate memory for scaling array\n");
  2637. rc = -ENOMEM;
  2638. goto free_arrays;
  2639. }
  2640. /* Convert corner max frequencies from Hz to MHz. */
  2641. for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
  2642. freq_max[i] /= 1000000;
  2643. for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
  2644. if (cpr_vreg->fuse_quot_offset &&
  2645. (cpr_vreg->cpr_fuse_ro_sel[i] !=
  2646. cpr_vreg->cpr_fuse_ro_sel[i - 1])) {
  2647. scaling[i] = 1000 * cpr_vreg->fuse_quot_offset[i]
  2648. / (freq_max[i] - freq_max[i - 1]);
  2649. } else {
  2650. scaling[i] = 1000 * (cpr_vreg->cpr_fuse_target_quot[i]
  2651. - cpr_vreg->cpr_fuse_target_quot[i - 1])
  2652. / (freq_max[i] - freq_max[i - 1]);
  2653. if (cpr_vreg->cpr_fuse_target_quot[i]
  2654. < cpr_vreg->cpr_fuse_target_quot[i - 1])
  2655. scaling[i] = 0;
  2656. }
  2657. scaling[i] = min(scaling[i], max_factor[i]);
  2658. cpr_info(cpr_vreg, "fuse corner %d quotient adjustment scaling factor: %d.%03d\n",
  2659. i, scaling[i] / 1000, scaling[i] % 1000);
  2660. }
  2661. /*
  2662. * Walk through the virtual corners mapped to each fuse corner
  2663. * and calculate the quotient adjustment for each one using the
  2664. * following formula:
  2665. * quot_adjust = (freq_max - freq_corner) * scaling / 1000
  2666. *
  2667. * @freq_max: max frequency in MHz supported by the fuse corner
  2668. * @freq_corner: frequency in MHz corresponding to the virtual corner
  2669. */
  2670. for (j = CPR_FUSE_CORNER_MIN + 1; j <= highest_fuse_corner; j++) {
  2671. for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
  2672. freq_corner = freq_map[i] / 1000000; /* MHz */
  2673. if (freq_corner > 0) {
  2674. cpr_vreg->quot_adjust[i] = scaling[j] *
  2675. (freq_max[j] - freq_corner) / 1000;
  2676. }
  2677. }
  2678. }
  2679. rc = cpr_virtual_corner_quot_adjust(cpr_vreg, dev);
  2680. if (rc) {
  2681. cpr_err(cpr_vreg, "count not adjust virtual-corner quot rc=%d\n",
  2682. rc);
  2683. goto free_arrays;
  2684. }
  2685. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
  2686. cpr_info(cpr_vreg, "adjusted quotient[%d] = %d\n", i,
  2687. cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
  2688. - cpr_vreg->quot_adjust[i]);
  2689. maps_valid = true;
  2690. free_arrays:
  2691. if (!rc) {
  2692. rc = cpr_get_open_loop_voltage(cpr_vreg, dev, corner_max,
  2693. freq_map, maps_valid);
  2694. if (rc) {
  2695. cpr_err(cpr_vreg, "could not fill open loop voltage array, rc=%d\n",
  2696. rc);
  2697. goto free_arrays_1;
  2698. }
  2699. rc = cpr_virtual_corner_voltage_adjust(cpr_vreg, dev);
  2700. if (rc)
  2701. cpr_err(cpr_vreg, "count not adjust virtual-corner voltage rc=%d\n",
  2702. rc);
  2703. }
  2704. free_arrays_1:
  2705. kfree(max_factor);
  2706. kfree(scaling);
  2707. kfree(freq_map);
  2708. kfree(corner_max);
  2709. kfree(freq_max);
  2710. return rc;
  2711. }
  2712. /*
  2713. * Check if the redundant set of CPR fuses should be used in place of the
  2714. * primary set and configure the cpr_fuse_redundant element accordingly.
  2715. */
  2716. static int cpr_check_redundant(struct platform_device *pdev,
  2717. struct cpr_regulator *cpr_vreg)
  2718. {
  2719. struct device_node *of_node = pdev->dev.of_node;
  2720. u32 cpr_fuse_redun_sel[5];
  2721. int rc;
  2722. if (of_find_property(of_node, "qcom,cpr-fuse-redun-sel", NULL)) {
  2723. rc = of_property_read_u32_array(of_node,
  2724. "qcom,cpr-fuse-redun-sel", cpr_fuse_redun_sel, 5);
  2725. if (rc < 0) {
  2726. cpr_err(cpr_vreg, "qcom,cpr-fuse-redun-sel missing: rc=%d\n",
  2727. rc);
  2728. return rc;
  2729. }
  2730. cpr_vreg->cpr_fuse_redundant
  2731. = cpr_fuse_is_setting_expected(cpr_vreg,
  2732. cpr_fuse_redun_sel);
  2733. } else {
  2734. cpr_vreg->cpr_fuse_redundant = false;
  2735. }
  2736. if (cpr_vreg->cpr_fuse_redundant)
  2737. cpr_info(cpr_vreg, "using redundant fuse parameters\n");
  2738. return 0;
  2739. }
  2740. static int cpr_read_fuse_revision(struct platform_device *pdev,
  2741. struct cpr_regulator *cpr_vreg)
  2742. {
  2743. struct device_node *of_node = pdev->dev.of_node;
  2744. u32 fuse_sel[4];
  2745. int rc;
  2746. if (of_find_property(of_node, "qcom,cpr-fuse-revision", NULL)) {
  2747. rc = of_property_read_u32_array(of_node,
  2748. "qcom,cpr-fuse-revision", fuse_sel, 4);
  2749. if (rc < 0) {
  2750. cpr_err(cpr_vreg, "qcom,cpr-fuse-revision read failed: rc=%d\n",
  2751. rc);
  2752. return rc;
  2753. }
  2754. cpr_vreg->cpr_fuse_revision
  2755. = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
  2756. fuse_sel[1], fuse_sel[2], fuse_sel[3]);
  2757. cpr_info(cpr_vreg, "fuse revision = %d\n",
  2758. cpr_vreg->cpr_fuse_revision);
  2759. } else {
  2760. cpr_vreg->cpr_fuse_revision = FUSE_REVISION_UNKNOWN;
  2761. }
  2762. return 0;
  2763. }
  2764. static int cpr_read_ro_select(struct platform_device *pdev,
  2765. struct cpr_regulator *cpr_vreg)
  2766. {
  2767. struct device_node *of_node = pdev->dev.of_node;
  2768. int rc = 0;
  2769. u32 cpr_fuse_row[2];
  2770. char *ro_sel_str;
  2771. int *bp_ro_sel;
  2772. int i;
  2773. bp_ro_sel
  2774. = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*bp_ro_sel),
  2775. GFP_KERNEL);
  2776. if (!bp_ro_sel) {
  2777. cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
  2778. return -ENOMEM;
  2779. }
  2780. if (cpr_vreg->cpr_fuse_redundant) {
  2781. rc = of_property_read_u32_array(of_node,
  2782. "qcom,cpr-fuse-redun-row",
  2783. cpr_fuse_row, 2);
  2784. ro_sel_str = "qcom,cpr-fuse-redun-ro-sel";
  2785. } else {
  2786. rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
  2787. cpr_fuse_row, 2);
  2788. ro_sel_str = "qcom,cpr-fuse-ro-sel";
  2789. }
  2790. if (rc)
  2791. goto error;
  2792. rc = of_property_read_u32_array(of_node, ro_sel_str,
  2793. &bp_ro_sel[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
  2794. if (rc) {
  2795. cpr_err(cpr_vreg, "%s read error, rc=%d\n", ro_sel_str, rc);
  2796. goto error;
  2797. }
  2798. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
  2799. cpr_vreg->cpr_fuse_ro_sel[i]
  2800. = cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
  2801. bp_ro_sel[i], CPR_FUSE_RO_SEL_BITS,
  2802. cpr_fuse_row[1]);
  2803. error:
  2804. kfree(bp_ro_sel);
  2805. return rc;
  2806. }
  2807. static int cpr_find_fuse_map_match(struct platform_device *pdev,
  2808. struct cpr_regulator *cpr_vreg)
  2809. {
  2810. struct device_node *of_node = pdev->dev.of_node;
  2811. int i, j, rc, tuple_size;
  2812. int len = 0;
  2813. u32 *tmp, val, ro;
  2814. /* Specify default no match case. */
  2815. cpr_vreg->cpr_fuse_map_match = FUSE_MAP_NO_MATCH;
  2816. cpr_vreg->cpr_fuse_map_count = 0;
  2817. if (!of_find_property(of_node, "qcom,cpr-fuse-version-map", &len)) {
  2818. /* No mapping present. */
  2819. return 0;
  2820. }
  2821. tuple_size = cpr_vreg->num_fuse_corners + 3;
  2822. cpr_vreg->cpr_fuse_map_count = len / (sizeof(u32) * tuple_size);
  2823. if (len == 0 || len % (sizeof(u32) * tuple_size)) {
  2824. cpr_err(cpr_vreg, "qcom,cpr-fuse-version-map length=%d is invalid\n",
  2825. len);
  2826. return -EINVAL;
  2827. }
  2828. tmp = kzalloc(len, GFP_KERNEL);
  2829. if (!tmp) {
  2830. cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
  2831. return -ENOMEM;
  2832. }
  2833. rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-version-map",
  2834. tmp, cpr_vreg->cpr_fuse_map_count * tuple_size);
  2835. if (rc) {
  2836. cpr_err(cpr_vreg, "could not read qcom,cpr-fuse-version-map, rc=%d\n",
  2837. rc);
  2838. goto done;
  2839. }
  2840. /*
  2841. * qcom,cpr-fuse-version-map tuple format:
  2842. * <speed_bin, pvs_version, cpr_fuse_revision, ro_sel[1], ...,
  2843. * ro_sel[n]> for n == number of fuse corners
  2844. */
  2845. for (i = 0; i < cpr_vreg->cpr_fuse_map_count; i++) {
  2846. if (tmp[i * tuple_size] != cpr_vreg->speed_bin
  2847. && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY)
  2848. continue;
  2849. if (tmp[i * tuple_size + 1] != cpr_vreg->pvs_version
  2850. && tmp[i * tuple_size + 1] != FUSE_PARAM_MATCH_ANY)
  2851. continue;
  2852. if (tmp[i * tuple_size + 2] != cpr_vreg->cpr_fuse_revision
  2853. && tmp[i * tuple_size + 2] != FUSE_PARAM_MATCH_ANY)
  2854. continue;
  2855. for (j = 0; j < cpr_vreg->num_fuse_corners; j++) {
  2856. val = tmp[i * tuple_size + 3 + j];
  2857. ro = cpr_vreg->cpr_fuse_ro_sel[j + CPR_FUSE_CORNER_MIN];
  2858. if (val != ro && val != FUSE_PARAM_MATCH_ANY)
  2859. break;
  2860. }
  2861. if (j == cpr_vreg->num_fuse_corners) {
  2862. cpr_vreg->cpr_fuse_map_match = i;
  2863. break;
  2864. }
  2865. }
  2866. if (cpr_vreg->cpr_fuse_map_match != FUSE_MAP_NO_MATCH)
  2867. cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match found: %d\n",
  2868. cpr_vreg->cpr_fuse_map_match);
  2869. else
  2870. cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match not found\n");
  2871. done:
  2872. kfree(tmp);
  2873. return rc;
  2874. }
  2875. static int cpr_minimum_quot_difference_adjustment(struct platform_device *pdev,
  2876. struct cpr_regulator *cpr_vreg)
  2877. {
  2878. struct device_node *of_node = pdev->dev.of_node;
  2879. int tuple_count, tuple_match;
  2880. int rc, i, len = 0;
  2881. u32 index, adjust_quot = 0;
  2882. u32 *min_diff_quot;
  2883. if (!of_find_property(of_node, "qcom,cpr-fuse-min-quot-diff", NULL))
  2884. /* No conditional adjustment needed on revised quotients. */
  2885. return 0;
  2886. if (!of_find_property(of_node, "qcom,cpr-min-quot-diff-adjustment",
  2887. &len)) {
  2888. cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment not specified\n");
  2889. return -ENODEV;
  2890. }
  2891. if (cpr_vreg->cpr_fuse_map_count) {
  2892. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
  2893. /* No matching index to use for quotient adjustment. */
  2894. return 0;
  2895. tuple_count = cpr_vreg->cpr_fuse_map_count;
  2896. tuple_match = cpr_vreg->cpr_fuse_map_match;
  2897. } else {
  2898. tuple_count = 1;
  2899. tuple_match = 0;
  2900. }
  2901. if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
  2902. cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment length=%d is invalid\n",
  2903. len);
  2904. return -EINVAL;
  2905. }
  2906. min_diff_quot = kzalloc(cpr_vreg->num_fuse_corners * sizeof(u32),
  2907. GFP_KERNEL);
  2908. if (!min_diff_quot) {
  2909. cpr_err(cpr_vreg, "memory alloc failed\n");
  2910. return -ENOMEM;
  2911. }
  2912. rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-min-quot-diff",
  2913. min_diff_quot,
  2914. cpr_vreg->num_fuse_corners);
  2915. if (rc < 0) {
  2916. cpr_err(cpr_vreg, "qcom,cpr-fuse-min-quot-diff reading failed, rc = %d\n",
  2917. rc);
  2918. goto error;
  2919. }
  2920. for (i = CPR_FUSE_CORNER_MIN + 1;
  2921. i <= cpr_vreg->num_fuse_corners; i++) {
  2922. if ((cpr_vreg->cpr_fuse_target_quot[i]
  2923. - cpr_vreg->cpr_fuse_target_quot[i - 1])
  2924. <= (int)min_diff_quot[i - CPR_FUSE_CORNER_MIN]) {
  2925. index = tuple_match * cpr_vreg->num_fuse_corners
  2926. + i - CPR_FUSE_CORNER_MIN;
  2927. rc = of_property_read_u32_index(of_node,
  2928. "qcom,cpr-min-quot-diff-adjustment",
  2929. index, &adjust_quot);
  2930. if (rc) {
  2931. cpr_err(cpr_vreg, "could not read qcom,cpr-min-quot-diff-adjustment index %u, rc=%d\n",
  2932. index, rc);
  2933. goto error;
  2934. }
  2935. cpr_vreg->cpr_fuse_target_quot[i]
  2936. = cpr_vreg->cpr_fuse_target_quot[i - 1]
  2937. + adjust_quot;
  2938. cpr_info(cpr_vreg, "Corner[%d]: revised adjusted quotient = %d\n",
  2939. i, cpr_vreg->cpr_fuse_target_quot[i]);
  2940. };
  2941. }
  2942. error:
  2943. kfree(min_diff_quot);
  2944. return rc;
  2945. }
  2946. static int cpr_adjust_target_quots(struct platform_device *pdev,
  2947. struct cpr_regulator *cpr_vreg)
  2948. {
  2949. struct device_node *of_node = pdev->dev.of_node;
  2950. int tuple_count, tuple_match, i;
  2951. u32 index;
  2952. u32 quot_adjust = 0;
  2953. int len = 0;
  2954. int rc = 0;
  2955. if (!of_find_property(of_node, "qcom,cpr-quotient-adjustment", &len)) {
  2956. /* No static quotient adjustment needed. */
  2957. return 0;
  2958. }
  2959. if (cpr_vreg->cpr_fuse_map_count) {
  2960. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  2961. /* No matching index to use for quotient adjustment. */
  2962. return 0;
  2963. }
  2964. tuple_count = cpr_vreg->cpr_fuse_map_count;
  2965. tuple_match = cpr_vreg->cpr_fuse_map_match;
  2966. } else {
  2967. tuple_count = 1;
  2968. tuple_match = 0;
  2969. }
  2970. if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
  2971. cpr_err(cpr_vreg, "qcom,cpr-quotient-adjustment length=%d is invalid\n",
  2972. len);
  2973. return -EINVAL;
  2974. }
  2975. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  2976. index = tuple_match * cpr_vreg->num_fuse_corners
  2977. + i - CPR_FUSE_CORNER_MIN;
  2978. rc = of_property_read_u32_index(of_node,
  2979. "qcom,cpr-quotient-adjustment", index, &quot_adjust);
  2980. if (rc) {
  2981. cpr_err(cpr_vreg, "could not read qcom,cpr-quotient-adjustment index %u, rc=%d\n",
  2982. index, rc);
  2983. return rc;
  2984. }
  2985. if (quot_adjust) {
  2986. cpr_vreg->cpr_fuse_target_quot[i] += quot_adjust;
  2987. cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
  2988. i, cpr_vreg->cpr_fuse_target_quot[i]);
  2989. }
  2990. }
  2991. rc = cpr_minimum_quot_difference_adjustment(pdev, cpr_vreg);
  2992. if (rc)
  2993. cpr_err(cpr_vreg, "failed to apply minimum quot difference rc=%d\n",
  2994. rc);
  2995. return rc;
  2996. }
  2997. static int cpr_check_allowed(struct platform_device *pdev,
  2998. struct cpr_regulator *cpr_vreg)
  2999. {
  3000. struct device_node *of_node = pdev->dev.of_node;
  3001. char *allow_str = "qcom,cpr-allowed";
  3002. int rc = 0, count;
  3003. int tuple_count, tuple_match;
  3004. u32 allow_status;
  3005. if (!of_find_property(of_node, allow_str, &count))
  3006. /* CPR is allowed for all fuse revisions. */
  3007. return 0;
  3008. count /= sizeof(u32);
  3009. if (cpr_vreg->cpr_fuse_map_count) {
  3010. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
  3011. /* No matching index to use for CPR allowed. */
  3012. return 0;
  3013. tuple_count = cpr_vreg->cpr_fuse_map_count;
  3014. tuple_match = cpr_vreg->cpr_fuse_map_match;
  3015. } else {
  3016. tuple_count = 1;
  3017. tuple_match = 0;
  3018. }
  3019. if (count != tuple_count) {
  3020. cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
  3021. count);
  3022. return -EINVAL;
  3023. }
  3024. rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
  3025. &allow_status);
  3026. if (rc) {
  3027. cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
  3028. allow_str, tuple_match, rc);
  3029. return rc;
  3030. }
  3031. if (allow_status && !cpr_vreg->cpr_fuse_disable)
  3032. cpr_vreg->cpr_fuse_disable = false;
  3033. else
  3034. cpr_vreg->cpr_fuse_disable = true;
  3035. cpr_info(cpr_vreg, "CPR closed loop is %s for fuse revision %d\n",
  3036. cpr_vreg->cpr_fuse_disable ? "disabled" : "enabled",
  3037. cpr_vreg->cpr_fuse_revision);
  3038. return rc;
  3039. }
  3040. static int cpr_check_de_aging_allowed(struct cpr_regulator *cpr_vreg,
  3041. struct device *dev)
  3042. {
  3043. struct device_node *of_node = dev->of_node;
  3044. char *allow_str = "qcom,cpr-de-aging-allowed";
  3045. int rc = 0, count;
  3046. int tuple_count, tuple_match;
  3047. u32 allow_status = 0;
  3048. if (!of_find_property(of_node, allow_str, &count)) {
  3049. /* CPR de-aging is not allowed for all fuse revisions. */
  3050. return allow_status;
  3051. }
  3052. count /= sizeof(u32);
  3053. if (cpr_vreg->cpr_fuse_map_count) {
  3054. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
  3055. /* No matching index to use for CPR de-aging allowed. */
  3056. return 0;
  3057. tuple_count = cpr_vreg->cpr_fuse_map_count;
  3058. tuple_match = cpr_vreg->cpr_fuse_map_match;
  3059. } else {
  3060. tuple_count = 1;
  3061. tuple_match = 0;
  3062. }
  3063. if (count != tuple_count) {
  3064. cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
  3065. count);
  3066. return -EINVAL;
  3067. }
  3068. rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
  3069. &allow_status);
  3070. if (rc) {
  3071. cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
  3072. allow_str, tuple_match, rc);
  3073. return rc;
  3074. }
  3075. cpr_info(cpr_vreg, "CPR de-aging is %s for fuse revision %d\n",
  3076. allow_status ? "allowed" : "not allowed",
  3077. cpr_vreg->cpr_fuse_revision);
  3078. return allow_status;
  3079. }
  3080. static int cpr_aging_init(struct platform_device *pdev,
  3081. struct cpr_regulator *cpr_vreg)
  3082. {
  3083. struct device_node *of_node = pdev->dev.of_node;
  3084. struct cpr_aging_info *aging_info;
  3085. struct cpr_aging_sensor_info *sensor_info;
  3086. int num_fuse_corners = cpr_vreg->num_fuse_corners;
  3087. int i, rc = 0, len = 0, num_aging_sensors, ro_sel, bits;
  3088. u32 *aging_sensor_id, *fuse_sel, *fuse_sel_orig;
  3089. u32 sensor = 0, non_collapsible_sensor_mask = 0;
  3090. u64 efuse_val;
  3091. struct property *prop;
  3092. if (!of_find_property(of_node, "qcom,cpr-aging-sensor-id", &len)) {
  3093. /* No CPR de-aging adjustments needed */
  3094. return 0;
  3095. }
  3096. if (len == 0) {
  3097. cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property format is invalid\n");
  3098. return -EINVAL;
  3099. }
  3100. num_aging_sensors = len / sizeof(u32);
  3101. cpr_debug(cpr_vreg, "No of aging sensors = %d\n", num_aging_sensors);
  3102. if (cpumask_empty(&cpr_vreg->cpu_mask)) {
  3103. cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
  3104. return -EINVAL;
  3105. }
  3106. rc = cpr_check_de_aging_allowed(cpr_vreg, &pdev->dev);
  3107. if (rc < 0) {
  3108. cpr_err(cpr_vreg, "cpr_check_de_aging_allowed failed: rc=%d\n",
  3109. rc);
  3110. return rc;
  3111. } else if (rc == 0) {
  3112. /* CPR de-aging is not allowed for the current fuse combo */
  3113. return 0;
  3114. }
  3115. aging_info = devm_kzalloc(&pdev->dev, sizeof(*aging_info),
  3116. GFP_KERNEL);
  3117. if (!aging_info)
  3118. return -ENOMEM;
  3119. cpr_vreg->aging_info = aging_info;
  3120. aging_info->num_aging_sensors = num_aging_sensors;
  3121. rc = of_property_read_u32(of_node, "qcom,cpr-aging-ref-corner",
  3122. &aging_info->aging_corner);
  3123. if (rc) {
  3124. cpr_err(cpr_vreg, "qcom,cpr-aging-ref-corner missing rc=%d\n",
  3125. rc);
  3126. return rc;
  3127. }
  3128. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ref-voltage",
  3129. &aging_info->aging_ref_voltage, rc);
  3130. if (rc)
  3131. return rc;
  3132. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-max-aging-margin",
  3133. &aging_info->max_aging_margin, rc);
  3134. if (rc)
  3135. return rc;
  3136. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ro-scaling-factor",
  3137. &aging_info->aging_ro_kv, rc);
  3138. if (rc)
  3139. return rc;
  3140. /* Check for DIV by 0 error */
  3141. if (aging_info->aging_ro_kv == 0) {
  3142. cpr_err(cpr_vreg, "invalid cpr-aging-ro-scaling-factor value: %u\n",
  3143. aging_info->aging_ro_kv);
  3144. return -EINVAL;
  3145. }
  3146. rc = of_property_read_u32_array(of_node, "qcom,cpr-ro-scaling-factor",
  3147. aging_info->cpr_ro_kv, CPR_NUM_RING_OSC);
  3148. if (rc) {
  3149. cpr_err(cpr_vreg, "qcom,cpr-ro-scaling-factor property read failed, rc = %d\n",
  3150. rc);
  3151. return rc;
  3152. }
  3153. if (of_find_property(of_node, "qcom,cpr-non-collapsible-sensors",
  3154. &len)) {
  3155. len = len / sizeof(u32);
  3156. if (len <= 0 || len > 32) {
  3157. cpr_err(cpr_vreg, "qcom,cpr-non-collapsible-sensors has an incorrect size\n");
  3158. return -EINVAL;
  3159. }
  3160. for (i = 0; i < len; i++) {
  3161. rc = of_property_read_u32_index(of_node,
  3162. "qcom,cpr-non-collapsible-sensors",
  3163. i, &sensor);
  3164. if (rc) {
  3165. cpr_err(cpr_vreg, "could not read qcom,cpr-non-collapsible-sensors index %u, rc=%d\n",
  3166. i, rc);
  3167. return rc;
  3168. }
  3169. if (sensor > 31) {
  3170. cpr_err(cpr_vreg, "invalid non-collapsible sensor = %u\n",
  3171. sensor);
  3172. return -EINVAL;
  3173. }
  3174. non_collapsible_sensor_mask |= BIT(sensor);
  3175. }
  3176. /*
  3177. * Bypass the sensors in collapsible domain for
  3178. * de-aging measurements
  3179. */
  3180. aging_info->aging_sensor_bypass =
  3181. ~(non_collapsible_sensor_mask);
  3182. cpr_debug(cpr_vreg, "sensor bypass mask for aging = 0x%08x\n",
  3183. aging_info->aging_sensor_bypass);
  3184. }
  3185. prop = of_find_property(pdev->dev.of_node, "qcom,cpr-aging-derate",
  3186. NULL);
  3187. if ((!prop) ||
  3188. (prop->length != num_fuse_corners * sizeof(u32))) {
  3189. cpr_err(cpr_vreg, "qcom,cpr-aging-derate incorrectly configured\n");
  3190. return -EINVAL;
  3191. }
  3192. aging_sensor_id = kcalloc(num_aging_sensors, sizeof(*aging_sensor_id),
  3193. GFP_KERNEL);
  3194. fuse_sel = kcalloc(num_aging_sensors * 4, sizeof(*fuse_sel),
  3195. GFP_KERNEL);
  3196. aging_info->voltage_adjust = devm_kcalloc(&pdev->dev,
  3197. num_fuse_corners + 1,
  3198. sizeof(*aging_info->voltage_adjust),
  3199. GFP_KERNEL);
  3200. aging_info->sensor_info = devm_kcalloc(&pdev->dev, num_aging_sensors,
  3201. sizeof(*aging_info->sensor_info),
  3202. GFP_KERNEL);
  3203. aging_info->aging_derate = devm_kcalloc(&pdev->dev,
  3204. num_fuse_corners + 1,
  3205. sizeof(*aging_info->aging_derate),
  3206. GFP_KERNEL);
  3207. if (!aging_info->aging_derate || !aging_sensor_id
  3208. || !aging_info->sensor_info || !fuse_sel
  3209. || !aging_info->voltage_adjust)
  3210. goto err;
  3211. rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-sensor-id",
  3212. aging_sensor_id, num_aging_sensors);
  3213. if (rc) {
  3214. cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property read failed, rc = %d\n",
  3215. rc);
  3216. goto err;
  3217. }
  3218. for (i = 0; i < num_aging_sensors; i++)
  3219. if (aging_sensor_id[i] < 0 || aging_sensor_id[i] > 31) {
  3220. cpr_err(cpr_vreg, "Invalid aging sensor id: %u\n",
  3221. aging_sensor_id[i]);
  3222. rc = -EINVAL;
  3223. goto err;
  3224. }
  3225. rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-derate",
  3226. &aging_info->aging_derate[CPR_FUSE_CORNER_MIN],
  3227. num_fuse_corners);
  3228. if (rc) {
  3229. cpr_err(cpr_vreg, "qcom,cpr-aging-derate property read failed, rc = %d\n",
  3230. rc);
  3231. goto err;
  3232. }
  3233. rc = of_property_read_u32_array(of_node,
  3234. "qcom,cpr-fuse-aging-init-quot-diff",
  3235. fuse_sel, (num_aging_sensors * 4));
  3236. if (rc) {
  3237. cpr_err(cpr_vreg, "qcom,cpr-fuse-aging-init-quot-diff read failed, rc = %d\n",
  3238. rc);
  3239. goto err;
  3240. }
  3241. fuse_sel_orig = fuse_sel;
  3242. sensor_info = aging_info->sensor_info;
  3243. for (i = 0; i < num_aging_sensors; i++, sensor_info++) {
  3244. sensor_info->sensor_id = aging_sensor_id[i];
  3245. efuse_val = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
  3246. fuse_sel[1], fuse_sel[2], fuse_sel[3]);
  3247. bits = fuse_sel[2];
  3248. sensor_info->initial_quot_diff = ((efuse_val & BIT(bits - 1)) ?
  3249. -1 : 1) * (efuse_val & (BIT(bits - 1) - 1));
  3250. cpr_debug(cpr_vreg, "Age sensor[%d] Initial quot diff = %d\n",
  3251. sensor_info->sensor_id,
  3252. sensor_info->initial_quot_diff);
  3253. fuse_sel += 4;
  3254. }
  3255. /*
  3256. * Add max aging margin here. This can be adjusted later in
  3257. * de-aging algorithm.
  3258. */
  3259. for (i = CPR_FUSE_CORNER_MIN; i <= num_fuse_corners; i++) {
  3260. ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
  3261. cpr_vreg->cpr_fuse_target_quot[i] +=
  3262. (aging_info->cpr_ro_kv[ro_sel]
  3263. * aging_info->max_aging_margin) / 1000000;
  3264. aging_info->voltage_adjust[i] = aging_info->max_aging_margin;
  3265. cpr_info(cpr_vreg, "Corner[%d]: age margin adjusted quotient = %d\n",
  3266. i, cpr_vreg->cpr_fuse_target_quot[i]);
  3267. }
  3268. kfree(fuse_sel_orig);
  3269. err:
  3270. kfree(aging_sensor_id);
  3271. return rc;
  3272. }
  3273. static int cpr_cpu_map_init(struct cpr_regulator *cpr_vreg, struct device *dev)
  3274. {
  3275. struct device_node *cpu_node;
  3276. int i, cpu;
  3277. if (!of_find_property(dev->of_node, "qcom,cpr-cpus",
  3278. &cpr_vreg->num_adj_cpus)) {
  3279. /* No adjustments based on online cores */
  3280. return 0;
  3281. }
  3282. cpr_vreg->num_adj_cpus /= sizeof(u32);
  3283. cpr_vreg->adj_cpus = devm_kcalloc(dev, cpr_vreg->num_adj_cpus,
  3284. sizeof(int), GFP_KERNEL);
  3285. if (!cpr_vreg->adj_cpus)
  3286. return -ENOMEM;
  3287. for (i = 0; i < cpr_vreg->num_adj_cpus; i++) {
  3288. cpu_node = of_parse_phandle(dev->of_node, "qcom,cpr-cpus", i);
  3289. if (!cpu_node) {
  3290. cpr_err(cpr_vreg, "could not find CPU node %d\n", i);
  3291. return -EINVAL;
  3292. }
  3293. cpr_vreg->adj_cpus[i] = -1;
  3294. for_each_possible_cpu(cpu) {
  3295. if (of_get_cpu_node(cpu, NULL) == cpu_node) {
  3296. cpr_vreg->adj_cpus[i] = cpu;
  3297. cpumask_set_cpu(cpu, &cpr_vreg->cpu_mask);
  3298. break;
  3299. }
  3300. }
  3301. of_node_put(cpu_node);
  3302. }
  3303. return 0;
  3304. }
  3305. static int cpr_init_cpr_efuse(struct platform_device *pdev,
  3306. struct cpr_regulator *cpr_vreg)
  3307. {
  3308. struct device_node *of_node = pdev->dev.of_node;
  3309. int i, rc = 0;
  3310. bool scheme_fuse_valid = false;
  3311. bool disable_fuse_valid = false;
  3312. char *targ_quot_str;
  3313. u32 cpr_fuse_row[2];
  3314. u32 bp_cpr_disable, bp_scheme;
  3315. size_t len;
  3316. int *bp_target_quot;
  3317. u64 fuse_bits, fuse_bits_2;
  3318. u32 *target_quot_size;
  3319. struct cpr_quot_scale *quot_scale;
  3320. len = cpr_vreg->num_fuse_corners + 1;
  3321. bp_target_quot = kzalloc(len * sizeof(*bp_target_quot), GFP_KERNEL);
  3322. target_quot_size = kzalloc(len * sizeof(*target_quot_size), GFP_KERNEL);
  3323. quot_scale = kzalloc(len * sizeof(*quot_scale), GFP_KERNEL);
  3324. if (!bp_target_quot || !target_quot_size || !quot_scale) {
  3325. cpr_err(cpr_vreg,
  3326. "Could not allocate memory for fuse parsing arrays\n");
  3327. rc = -ENOMEM;
  3328. goto error;
  3329. }
  3330. if (cpr_vreg->cpr_fuse_redundant) {
  3331. rc = of_property_read_u32_array(of_node,
  3332. "qcom,cpr-fuse-redun-row",
  3333. cpr_fuse_row, 2);
  3334. targ_quot_str = "qcom,cpr-fuse-redun-target-quot";
  3335. } else {
  3336. rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
  3337. cpr_fuse_row, 2);
  3338. targ_quot_str = "qcom,cpr-fuse-target-quot";
  3339. }
  3340. if (rc)
  3341. goto error;
  3342. rc = of_property_read_u32_array(of_node, targ_quot_str,
  3343. &bp_target_quot[CPR_FUSE_CORNER_MIN],
  3344. cpr_vreg->num_fuse_corners);
  3345. if (rc < 0) {
  3346. cpr_err(cpr_vreg, "missing %s: rc=%d\n", targ_quot_str, rc);
  3347. goto error;
  3348. }
  3349. if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-size", NULL)) {
  3350. rc = of_property_read_u32_array(of_node,
  3351. "qcom,cpr-fuse-target-quot-size",
  3352. &target_quot_size[CPR_FUSE_CORNER_MIN],
  3353. cpr_vreg->num_fuse_corners);
  3354. if (rc < 0) {
  3355. cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-size: rc=%d\n",
  3356. rc);
  3357. goto error;
  3358. }
  3359. } else {
  3360. /*
  3361. * Default fuse quotient parameter size to match target register
  3362. * size.
  3363. */
  3364. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
  3365. i++)
  3366. target_quot_size[i] = CPR_FUSE_TARGET_QUOT_BITS;
  3367. }
  3368. if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-scale",
  3369. NULL)) {
  3370. for (i = 0; i < cpr_vreg->num_fuse_corners; i++) {
  3371. rc = of_property_read_u32_index(of_node,
  3372. "qcom,cpr-fuse-target-quot-scale", i * 2,
  3373. &quot_scale[i + CPR_FUSE_CORNER_MIN].offset);
  3374. if (rc < 0) {
  3375. cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
  3376. rc);
  3377. goto error;
  3378. }
  3379. rc = of_property_read_u32_index(of_node,
  3380. "qcom,cpr-fuse-target-quot-scale", i * 2 + 1,
  3381. &quot_scale[i + CPR_FUSE_CORNER_MIN].multiplier);
  3382. if (rc < 0) {
  3383. cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
  3384. rc);
  3385. goto error;
  3386. }
  3387. }
  3388. } else {
  3389. /*
  3390. * In the default case, target quotients require no scaling so
  3391. * use offset = 0, multiplier = 1.
  3392. */
  3393. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
  3394. i++) {
  3395. quot_scale[i].offset = 0;
  3396. quot_scale[i].multiplier = 1;
  3397. }
  3398. }
  3399. /* Read the control bits of eFuse */
  3400. fuse_bits = cpr_read_efuse_row(cpr_vreg, cpr_fuse_row[0],
  3401. cpr_fuse_row[1]);
  3402. cpr_info(cpr_vreg, "[row:%d] = 0x%llx\n", cpr_fuse_row[0], fuse_bits);
  3403. if (cpr_vreg->cpr_fuse_redundant) {
  3404. if (of_find_property(of_node,
  3405. "qcom,cpr-fuse-redun-bp-cpr-disable", NULL)) {
  3406. CPR_PROP_READ_U32(cpr_vreg, of_node,
  3407. "cpr-fuse-redun-bp-cpr-disable",
  3408. &bp_cpr_disable, rc);
  3409. disable_fuse_valid = true;
  3410. if (of_find_property(of_node,
  3411. "qcom,cpr-fuse-redun-bp-scheme",
  3412. NULL)) {
  3413. CPR_PROP_READ_U32(cpr_vreg, of_node,
  3414. "cpr-fuse-redun-bp-scheme",
  3415. &bp_scheme, rc);
  3416. scheme_fuse_valid = true;
  3417. }
  3418. if (rc)
  3419. goto error;
  3420. fuse_bits_2 = fuse_bits;
  3421. } else {
  3422. u32 temp_row[2];
  3423. /* Use original fuse if no optional property */
  3424. if (of_find_property(of_node,
  3425. "qcom,cpr-fuse-bp-cpr-disable", NULL)) {
  3426. CPR_PROP_READ_U32(cpr_vreg, of_node,
  3427. "cpr-fuse-bp-cpr-disable",
  3428. &bp_cpr_disable, rc);
  3429. disable_fuse_valid = true;
  3430. }
  3431. if (of_find_property(of_node,
  3432. "qcom,cpr-fuse-bp-scheme",
  3433. NULL)) {
  3434. CPR_PROP_READ_U32(cpr_vreg, of_node,
  3435. "cpr-fuse-bp-scheme",
  3436. &bp_scheme, rc);
  3437. scheme_fuse_valid = true;
  3438. }
  3439. rc = of_property_read_u32_array(of_node,
  3440. "qcom,cpr-fuse-row",
  3441. temp_row, 2);
  3442. if (rc)
  3443. goto error;
  3444. fuse_bits_2 = cpr_read_efuse_row(cpr_vreg, temp_row[0],
  3445. temp_row[1]);
  3446. cpr_info(cpr_vreg, "[original row:%d] = 0x%llx\n",
  3447. temp_row[0], fuse_bits_2);
  3448. }
  3449. } else {
  3450. if (of_find_property(of_node, "qcom,cpr-fuse-bp-cpr-disable",
  3451. NULL)) {
  3452. CPR_PROP_READ_U32(cpr_vreg, of_node,
  3453. "cpr-fuse-bp-cpr-disable", &bp_cpr_disable, rc);
  3454. disable_fuse_valid = true;
  3455. }
  3456. if (of_find_property(of_node, "qcom,cpr-fuse-bp-scheme",
  3457. NULL)) {
  3458. CPR_PROP_READ_U32(cpr_vreg, of_node,
  3459. "cpr-fuse-bp-scheme", &bp_scheme, rc);
  3460. scheme_fuse_valid = true;
  3461. }
  3462. if (rc)
  3463. goto error;
  3464. fuse_bits_2 = fuse_bits;
  3465. }
  3466. if (disable_fuse_valid) {
  3467. cpr_vreg->cpr_fuse_disable =
  3468. (fuse_bits_2 >> bp_cpr_disable) & 0x01;
  3469. cpr_info(cpr_vreg, "CPR disable fuse = %d\n",
  3470. cpr_vreg->cpr_fuse_disable);
  3471. } else {
  3472. cpr_vreg->cpr_fuse_disable = false;
  3473. }
  3474. if (scheme_fuse_valid) {
  3475. cpr_vreg->cpr_fuse_local = (fuse_bits_2 >> bp_scheme) & 0x01;
  3476. cpr_info(cpr_vreg, "local = %d\n", cpr_vreg->cpr_fuse_local);
  3477. } else {
  3478. cpr_vreg->cpr_fuse_local = true;
  3479. }
  3480. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  3481. cpr_vreg->cpr_fuse_target_quot[i]
  3482. = cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
  3483. bp_target_quot[i], target_quot_size[i],
  3484. cpr_fuse_row[1]);
  3485. /* Unpack the target quotient by scaling. */
  3486. cpr_vreg->cpr_fuse_target_quot[i] *= quot_scale[i].multiplier;
  3487. cpr_vreg->cpr_fuse_target_quot[i] += quot_scale[i].offset;
  3488. cpr_info(cpr_vreg,
  3489. "Corner[%d]: ro_sel = %d, target quot = %d\n", i,
  3490. cpr_vreg->cpr_fuse_ro_sel[i],
  3491. cpr_vreg->cpr_fuse_target_quot[i]);
  3492. }
  3493. rc = cpr_cpu_map_init(cpr_vreg, &pdev->dev);
  3494. if (rc) {
  3495. cpr_err(cpr_vreg, "CPR cpu map init failed: rc=%d\n", rc);
  3496. goto error;
  3497. }
  3498. rc = cpr_aging_init(pdev, cpr_vreg);
  3499. if (rc) {
  3500. cpr_err(cpr_vreg, "CPR aging init failed: rc=%d\n", rc);
  3501. goto error;
  3502. }
  3503. rc = cpr_adjust_target_quots(pdev, cpr_vreg);
  3504. if (rc)
  3505. goto error;
  3506. for (i = CPR_FUSE_CORNER_MIN + 1;
  3507. i <= cpr_vreg->num_fuse_corners; i++) {
  3508. if (cpr_vreg->cpr_fuse_target_quot[i]
  3509. < cpr_vreg->cpr_fuse_target_quot[i - 1] &&
  3510. cpr_vreg->cpr_fuse_ro_sel[i] ==
  3511. cpr_vreg->cpr_fuse_ro_sel[i - 1]) {
  3512. cpr_vreg->cpr_fuse_disable = true;
  3513. cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
  3514. }
  3515. }
  3516. if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
  3517. cpr_voltage_uplift_wa_inc_quot(cpr_vreg, of_node);
  3518. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
  3519. i++) {
  3520. cpr_info(cpr_vreg,
  3521. "Corner[%d]: uplifted target quot = %d\n",
  3522. i, cpr_vreg->cpr_fuse_target_quot[i]);
  3523. }
  3524. }
  3525. /*
  3526. * Check whether the fuse-quot-offset is defined per fuse corner.
  3527. * If it is defined, use it (quot_offset) in the calculation
  3528. * below for obtaining scaling factor per fuse corner.
  3529. */
  3530. rc = cpr_get_fuse_quot_offset(cpr_vreg, pdev, quot_scale);
  3531. if (rc < 0)
  3532. goto error;
  3533. rc = cpr_get_corner_quot_adjustment(cpr_vreg, &pdev->dev);
  3534. if (rc)
  3535. goto error;
  3536. cpr_vreg->cpr_fuse_bits = fuse_bits;
  3537. if (!cpr_vreg->cpr_fuse_bits) {
  3538. cpr_vreg->cpr_fuse_disable = true;
  3539. cpr_err(cpr_vreg,
  3540. "cpr_fuse_bits == 0; permanently disabling CPR\n");
  3541. } else if (!cpr_vreg->fuse_quot_offset) {
  3542. /*
  3543. * Check if the target quotients for the highest two fuse
  3544. * corners are too close together.
  3545. */
  3546. int *quot = cpr_vreg->cpr_fuse_target_quot;
  3547. int highest_fuse_corner = cpr_vreg->num_fuse_corners;
  3548. u32 min_diff_quot;
  3549. bool valid_fuse = true;
  3550. min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF;
  3551. of_property_read_u32(of_node, "qcom,cpr-quot-min-diff",
  3552. &min_diff_quot);
  3553. if (quot[highest_fuse_corner] > quot[highest_fuse_corner - 1]) {
  3554. if ((quot[highest_fuse_corner]
  3555. - quot[highest_fuse_corner - 1])
  3556. <= min_diff_quot)
  3557. valid_fuse = false;
  3558. } else {
  3559. valid_fuse = false;
  3560. }
  3561. if (!valid_fuse) {
  3562. cpr_vreg->cpr_fuse_disable = true;
  3563. cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
  3564. }
  3565. }
  3566. rc = cpr_check_allowed(pdev, cpr_vreg);
  3567. error:
  3568. kfree(bp_target_quot);
  3569. kfree(target_quot_size);
  3570. kfree(quot_scale);
  3571. return rc;
  3572. }
  3573. static int cpr_init_cpr_voltages(struct cpr_regulator *cpr_vreg,
  3574. struct device *dev)
  3575. {
  3576. int i;
  3577. int size = cpr_vreg->num_corners + 1;
  3578. cpr_vreg->last_volt = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
  3579. if (!cpr_vreg->last_volt)
  3580. return -EINVAL;
  3581. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
  3582. cpr_vreg->last_volt[i] = cpr_vreg->open_loop_volt[i];
  3583. return 0;
  3584. }
  3585. /*
  3586. * This function fills the virtual_limit array with voltages read from the
  3587. * prop_name device tree property if a given tuple in the property matches
  3588. * the speedbin and PVS version fuses found on the chip. Otherwise,
  3589. * it fills the virtual_limit_array with corresponding values from the
  3590. * fuse_limit_array.
  3591. */
  3592. static int cpr_fill_override_voltage(struct cpr_regulator *cpr_vreg,
  3593. struct device *dev, const char *prop_name, const char *label,
  3594. int *virtual_limit, int *fuse_limit)
  3595. {
  3596. int rc = 0;
  3597. int i, j, size, pos;
  3598. struct property *prop;
  3599. bool match_found = false;
  3600. size_t buflen;
  3601. char *buf;
  3602. u32 *tmp;
  3603. prop = of_find_property(dev->of_node, prop_name, NULL);
  3604. if (!prop)
  3605. goto use_fuse_corner_limits;
  3606. size = prop->length / sizeof(u32);
  3607. if (size == 0 || size % (cpr_vreg->num_corners + 2)) {
  3608. cpr_err(cpr_vreg, "%s property format is invalid; reusing per-fuse-corner limits\n",
  3609. prop_name);
  3610. goto use_fuse_corner_limits;
  3611. }
  3612. tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
  3613. if (!tmp) {
  3614. cpr_err(cpr_vreg, "memory alloc failed\n");
  3615. return -ENOMEM;
  3616. }
  3617. rc = of_property_read_u32_array(dev->of_node, prop_name, tmp, size);
  3618. if (rc < 0) {
  3619. kfree(tmp);
  3620. cpr_err(cpr_vreg, "%s reading failed, rc = %d\n", prop_name,
  3621. rc);
  3622. return rc;
  3623. }
  3624. /*
  3625. * Get limit voltage for each virtual corner based upon the speed_bin
  3626. * and pvs_version values.
  3627. */
  3628. for (i = 0; i < size; i += cpr_vreg->num_corners + 2) {
  3629. if (tmp[i] != cpr_vreg->speed_bin &&
  3630. tmp[i] != FUSE_PARAM_MATCH_ANY)
  3631. continue;
  3632. if (tmp[i + 1] != cpr_vreg->pvs_version &&
  3633. tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
  3634. continue;
  3635. for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++)
  3636. virtual_limit[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
  3637. match_found = true;
  3638. break;
  3639. }
  3640. kfree(tmp);
  3641. if (!match_found)
  3642. goto use_fuse_corner_limits;
  3643. /*
  3644. * Log per-virtual-corner voltage limits since they are useful for
  3645. * baseline CPR debugging.
  3646. */
  3647. buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
  3648. buf = kzalloc(buflen, GFP_KERNEL);
  3649. if (buf == NULL) {
  3650. cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
  3651. return 0;
  3652. }
  3653. for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
  3654. pos += scnprintf(buf + pos, buflen - pos, "%d%s",
  3655. virtual_limit[i], i < cpr_vreg->num_corners ? " " : "");
  3656. cpr_info(cpr_vreg, "%s override voltage: [%s] uV\n", label, buf);
  3657. kfree(buf);
  3658. return rc;
  3659. use_fuse_corner_limits:
  3660. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
  3661. virtual_limit[i] = fuse_limit[cpr_vreg->corner_map[i]];
  3662. return rc;
  3663. }
  3664. /*
  3665. * This function loads per-virtual-corner ceiling and floor voltages from device
  3666. * tree if their respective device tree properties are present. These limits
  3667. * override those found in the per-fuse-corner arrays fuse_ceiling_volt and
  3668. * fuse_floor_volt.
  3669. */
  3670. static int cpr_init_ceiling_floor_override_voltages(
  3671. struct cpr_regulator *cpr_vreg, struct device *dev)
  3672. {
  3673. int rc, i;
  3674. int size = cpr_vreg->num_corners + 1;
  3675. cpr_vreg->ceiling_volt = devm_kzalloc(dev, sizeof(int) * size,
  3676. GFP_KERNEL);
  3677. cpr_vreg->floor_volt = devm_kzalloc(dev, sizeof(int) * size,
  3678. GFP_KERNEL);
  3679. cpr_vreg->cpr_max_ceiling = devm_kzalloc(dev, sizeof(int) * size,
  3680. GFP_KERNEL);
  3681. if (!cpr_vreg->ceiling_volt || !cpr_vreg->floor_volt ||
  3682. !cpr_vreg->cpr_max_ceiling)
  3683. return -ENOMEM;
  3684. rc = cpr_fill_override_voltage(cpr_vreg, dev,
  3685. "qcom,cpr-voltage-ceiling-override", "ceiling",
  3686. cpr_vreg->ceiling_volt, cpr_vreg->fuse_ceiling_volt);
  3687. if (rc)
  3688. return rc;
  3689. rc = cpr_fill_override_voltage(cpr_vreg, dev,
  3690. "qcom,cpr-voltage-floor-override", "floor",
  3691. cpr_vreg->floor_volt, cpr_vreg->fuse_floor_volt);
  3692. if (rc)
  3693. return rc;
  3694. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  3695. if (cpr_vreg->floor_volt[i] > cpr_vreg->ceiling_volt[i]) {
  3696. cpr_err(cpr_vreg, "virtual corner %d floor=%d uV > ceiling=%d uV\n",
  3697. i, cpr_vreg->floor_volt[i],
  3698. cpr_vreg->ceiling_volt[i]);
  3699. return -EINVAL;
  3700. }
  3701. if (cpr_vreg->ceiling_max < cpr_vreg->ceiling_volt[i])
  3702. cpr_vreg->ceiling_max = cpr_vreg->ceiling_volt[i];
  3703. cpr_vreg->cpr_max_ceiling[i] = cpr_vreg->ceiling_volt[i];
  3704. }
  3705. return rc;
  3706. }
  3707. /*
  3708. * This function computes the per-virtual-corner floor voltages from
  3709. * per-virtual-corner ceiling voltages with an offset specified by a
  3710. * device-tree property. This must be called after open-loop voltage
  3711. * scaling, floor_volt array loading and the ceiling voltage is
  3712. * conditionally reduced to the open-loop voltage. It selects the
  3713. * maximum value between the calculated floor voltage values and
  3714. * the floor_volt array values and stores them in the floor_volt array.
  3715. */
  3716. static int cpr_init_floor_to_ceiling_range(
  3717. struct cpr_regulator *cpr_vreg, struct device *dev)
  3718. {
  3719. int rc, i, tuple_count, tuple_match, len, pos;
  3720. u32 index, floor_volt_adjust = 0;
  3721. char *prop_str, *buf;
  3722. size_t buflen;
  3723. prop_str = "qcom,cpr-floor-to-ceiling-max-range";
  3724. if (!of_find_property(dev->of_node, prop_str, &len))
  3725. return 0;
  3726. if (cpr_vreg->cpr_fuse_map_count) {
  3727. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  3728. /*
  3729. * No matching index to use for floor-to-ceiling
  3730. * max range.
  3731. */
  3732. return 0;
  3733. }
  3734. tuple_count = cpr_vreg->cpr_fuse_map_count;
  3735. tuple_match = cpr_vreg->cpr_fuse_map_match;
  3736. } else {
  3737. tuple_count = 1;
  3738. tuple_match = 0;
  3739. }
  3740. if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
  3741. cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_str, len);
  3742. return -EINVAL;
  3743. }
  3744. for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
  3745. index = tuple_match * cpr_vreg->num_corners
  3746. + i - CPR_CORNER_MIN;
  3747. rc = of_property_read_u32_index(dev->of_node, prop_str,
  3748. index, &floor_volt_adjust);
  3749. if (rc) {
  3750. cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
  3751. prop_str, index, rc);
  3752. return rc;
  3753. }
  3754. if ((int)floor_volt_adjust >= 0) {
  3755. cpr_vreg->floor_volt[i] = max(cpr_vreg->floor_volt[i],
  3756. (cpr_vreg->ceiling_volt[i]
  3757. - (int)floor_volt_adjust));
  3758. cpr_vreg->floor_volt[i]
  3759. = DIV_ROUND_UP(cpr_vreg->floor_volt[i],
  3760. cpr_vreg->step_volt) *
  3761. cpr_vreg->step_volt;
  3762. if (cpr_vreg->open_loop_volt[i]
  3763. < cpr_vreg->floor_volt[i])
  3764. cpr_vreg->open_loop_volt[i]
  3765. = cpr_vreg->floor_volt[i];
  3766. }
  3767. }
  3768. /*
  3769. * Log per-virtual-corner voltage limits resulted after considering the
  3770. * floor-to-ceiling max range since they are useful for baseline CPR
  3771. * debugging.
  3772. */
  3773. buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
  3774. buf = kzalloc(buflen, GFP_KERNEL);
  3775. if (buf == NULL) {
  3776. cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
  3777. return 0;
  3778. }
  3779. for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
  3780. pos += scnprintf(buf + pos, buflen - pos, "%d%s",
  3781. cpr_vreg->floor_volt[i],
  3782. i < cpr_vreg->num_corners ? " " : "");
  3783. cpr_info(cpr_vreg, "Final floor override voltages: [%s] uV\n", buf);
  3784. kfree(buf);
  3785. return 0;
  3786. }
  3787. static int cpr_init_step_quotient(struct platform_device *pdev,
  3788. struct cpr_regulator *cpr_vreg)
  3789. {
  3790. struct device_node *of_node = pdev->dev.of_node;
  3791. int len = 0;
  3792. u32 step_quot[CPR_NUM_RING_OSC];
  3793. int i, rc;
  3794. if (!of_find_property(of_node, "qcom,cpr-step-quotient", &len)) {
  3795. cpr_err(cpr_vreg, "qcom,cpr-step-quotient property missing\n");
  3796. return -EINVAL;
  3797. }
  3798. if (len == sizeof(u32)) {
  3799. /* Single step quotient used for all ring oscillators. */
  3800. rc = of_property_read_u32(of_node, "qcom,cpr-step-quotient",
  3801. step_quot);
  3802. if (rc) {
  3803. cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
  3804. rc);
  3805. return rc;
  3806. }
  3807. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
  3808. i++)
  3809. cpr_vreg->step_quotient[i] = step_quot[0];
  3810. } else if (len == sizeof(u32) * CPR_NUM_RING_OSC) {
  3811. /* Unique step quotient used per ring oscillator. */
  3812. rc = of_property_read_u32_array(of_node,
  3813. "qcom,cpr-step-quotient", step_quot, CPR_NUM_RING_OSC);
  3814. if (rc) {
  3815. cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
  3816. rc);
  3817. return rc;
  3818. }
  3819. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
  3820. i++)
  3821. cpr_vreg->step_quotient[i]
  3822. = step_quot[cpr_vreg->cpr_fuse_ro_sel[i]];
  3823. } else {
  3824. cpr_err(cpr_vreg, "qcom,cpr-step-quotient has invalid length=%d\n",
  3825. len);
  3826. return -EINVAL;
  3827. }
  3828. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
  3829. cpr_debug(cpr_vreg, "step_quotient[%d]=%u\n", i,
  3830. cpr_vreg->step_quotient[i]);
  3831. return 0;
  3832. }
  3833. static int cpr_init_cpr_parameters(struct platform_device *pdev,
  3834. struct cpr_regulator *cpr_vreg)
  3835. {
  3836. struct device_node *of_node = pdev->dev.of_node;
  3837. int rc = 0;
  3838. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-ref-clk",
  3839. &cpr_vreg->ref_clk_khz, rc);
  3840. if (rc)
  3841. return rc;
  3842. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-delay",
  3843. &cpr_vreg->timer_delay_us, rc);
  3844. if (rc)
  3845. return rc;
  3846. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-up",
  3847. &cpr_vreg->timer_cons_up, rc);
  3848. if (rc)
  3849. return rc;
  3850. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-down",
  3851. &cpr_vreg->timer_cons_down, rc);
  3852. if (rc)
  3853. return rc;
  3854. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-irq-line",
  3855. &cpr_vreg->irq_line, rc);
  3856. if (rc)
  3857. return rc;
  3858. rc = cpr_init_step_quotient(pdev, cpr_vreg);
  3859. if (rc)
  3860. return rc;
  3861. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-up-threshold",
  3862. &cpr_vreg->up_threshold, rc);
  3863. if (rc)
  3864. return rc;
  3865. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-down-threshold",
  3866. &cpr_vreg->down_threshold, rc);
  3867. if (rc)
  3868. return rc;
  3869. cpr_info(cpr_vreg, "up threshold = %u, down threshold = %u\n",
  3870. cpr_vreg->up_threshold, cpr_vreg->down_threshold);
  3871. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-idle-clocks",
  3872. &cpr_vreg->idle_clocks, rc);
  3873. if (rc)
  3874. return rc;
  3875. CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-gcnt-time",
  3876. &cpr_vreg->gcnt_time_us, rc);
  3877. if (rc)
  3878. return rc;
  3879. CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-up-limit",
  3880. &cpr_vreg->vdd_apc_step_up_limit, rc);
  3881. if (rc)
  3882. return rc;
  3883. CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-down-limit",
  3884. &cpr_vreg->vdd_apc_step_down_limit, rc);
  3885. if (rc)
  3886. return rc;
  3887. rc = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval",
  3888. &cpr_vreg->clamp_timer_interval);
  3889. if (rc && rc != -EINVAL) {
  3890. cpr_err(cpr_vreg,
  3891. "error reading qcom,cpr-clamp-timer-interval, rc=%d\n",
  3892. rc);
  3893. return rc;
  3894. }
  3895. cpr_vreg->clamp_timer_interval = min(cpr_vreg->clamp_timer_interval,
  3896. (u32)RBIF_TIMER_ADJ_CLAMP_INT_MASK);
  3897. /* Init module parameter with the DT value */
  3898. cpr_vreg->enable = of_property_read_bool(of_node, "qcom,cpr-enable");
  3899. cpr_info(cpr_vreg, "CPR is %s by default.\n",
  3900. cpr_vreg->enable ? "enabled" : "disabled");
  3901. return 0;
  3902. }
  3903. static void cpr_regulator_switch_adj_cpus(struct cpr_regulator *cpr_vreg)
  3904. {
  3905. cpr_vreg->last_volt = cpr_vreg->adj_cpus_last_volt
  3906. [cpr_vreg->online_cpus];
  3907. cpr_vreg->save_ctl = cpr_vreg->adj_cpus_save_ctl[cpr_vreg->online_cpus];
  3908. cpr_vreg->save_irq = cpr_vreg->adj_cpus_save_irq[cpr_vreg->online_cpus];
  3909. if (cpr_vreg->adj_cpus_quot_adjust)
  3910. cpr_vreg->quot_adjust = cpr_vreg->adj_cpus_quot_adjust
  3911. [cpr_vreg->online_cpus];
  3912. if (cpr_vreg->adj_cpus_open_loop_volt)
  3913. cpr_vreg->open_loop_volt
  3914. = cpr_vreg->adj_cpus_open_loop_volt
  3915. [cpr_vreg->online_cpus];
  3916. if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
  3917. cpr_vreg->ceiling_volt = cpr_vreg->open_loop_volt;
  3918. }
  3919. static void cpr_regulator_set_online_cpus(struct cpr_regulator *cpr_vreg)
  3920. {
  3921. int i, j;
  3922. cpr_vreg->online_cpus = 0;
  3923. get_online_cpus();
  3924. for_each_online_cpu(i)
  3925. for (j = 0; j < cpr_vreg->num_adj_cpus; j++)
  3926. if (i == cpr_vreg->adj_cpus[j])
  3927. cpr_vreg->online_cpus++;
  3928. put_online_cpus();
  3929. }
  3930. static int cpr_regulator_cpu_callback(struct notifier_block *nb,
  3931. unsigned long action, void *data)
  3932. {
  3933. struct cpr_regulator *cpr_vreg = container_of(nb, struct cpr_regulator,
  3934. cpu_notifier);
  3935. int cpu = (long)data;
  3936. int prev_online_cpus, rc, i;
  3937. action &= ~CPU_TASKS_FROZEN;
  3938. if (action != CPU_UP_PREPARE && action != CPU_UP_CANCELED
  3939. && action != CPU_DEAD)
  3940. return NOTIFY_OK;
  3941. mutex_lock(&cpr_vreg->cpr_mutex);
  3942. if (cpr_vreg->skip_voltage_change_during_suspend
  3943. && cpr_vreg->is_cpr_suspended) {
  3944. /* Do nothing during system suspend/resume */
  3945. goto done;
  3946. }
  3947. prev_online_cpus = cpr_vreg->online_cpus;
  3948. cpr_regulator_set_online_cpus(cpr_vreg);
  3949. if (action == CPU_UP_PREPARE)
  3950. for (i = 0; i < cpr_vreg->num_adj_cpus; i++)
  3951. if (cpu == cpr_vreg->adj_cpus[i]) {
  3952. cpr_vreg->online_cpus++;
  3953. break;
  3954. }
  3955. if (cpr_vreg->online_cpus == prev_online_cpus)
  3956. goto done;
  3957. cpr_debug(cpr_vreg, "adjusting corner %d quotient for %d cpus\n",
  3958. cpr_vreg->corner, cpr_vreg->online_cpus);
  3959. cpr_regulator_switch_adj_cpus(cpr_vreg);
  3960. if (cpr_vreg->corner) {
  3961. rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
  3962. cpr_vreg->corner, true);
  3963. if (rc)
  3964. cpr_err(cpr_vreg, "could not update quotient, rc=%d\n",
  3965. rc);
  3966. }
  3967. done:
  3968. mutex_unlock(&cpr_vreg->cpr_mutex);
  3969. return NOTIFY_OK;
  3970. }
  3971. static void cpr_pm_disable(struct cpr_regulator *cpr_vreg, bool disable)
  3972. {
  3973. u32 reg_val;
  3974. if (cpr_vreg->is_cpr_suspended)
  3975. return;
  3976. reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
  3977. if (disable) {
  3978. /* Proceed only if CPR is enabled */
  3979. if (!(reg_val & RBCPR_CTL_LOOP_EN))
  3980. return;
  3981. cpr_ctl_disable(cpr_vreg);
  3982. cpr_vreg->cpr_disabled_in_pc = true;
  3983. } else {
  3984. /* Proceed only if CPR was disabled in PM_ENTER */
  3985. if (!cpr_vreg->cpr_disabled_in_pc)
  3986. return;
  3987. cpr_vreg->cpr_disabled_in_pc = false;
  3988. cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
  3989. }
  3990. /* Make sure register write is complete */
  3991. mb();
  3992. }
  3993. static int cpr_pm_callback(struct notifier_block *nb,
  3994. unsigned long action, void *data)
  3995. {
  3996. struct cpr_regulator *cpr_vreg = container_of(nb,
  3997. struct cpr_regulator, pm_notifier);
  3998. if (action != CPU_PM_ENTER && action != CPU_PM_ENTER_FAILED &&
  3999. action != CPU_PM_EXIT)
  4000. return NOTIFY_OK;
  4001. switch (action) {
  4002. case CPU_PM_ENTER:
  4003. cpr_pm_disable(cpr_vreg, true);
  4004. break;
  4005. case CPU_PM_ENTER_FAILED:
  4006. case CPU_PM_EXIT:
  4007. cpr_pm_disable(cpr_vreg, false);
  4008. break;
  4009. }
  4010. return NOTIFY_OK;
  4011. }
  4012. static int cpr_parse_adj_cpus_init_voltage(struct cpr_regulator *cpr_vreg,
  4013. struct device *dev)
  4014. {
  4015. int rc, i, j, k, tuple_count, tuple_match, len, offset;
  4016. int *temp;
  4017. if (!of_find_property(dev->of_node,
  4018. "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
  4019. NULL))
  4020. return 0;
  4021. if (cpr_vreg->cpr_fuse_map_count) {
  4022. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  4023. /* No matching index to use for voltage adjustment. */
  4024. return 0;
  4025. }
  4026. tuple_count = cpr_vreg->cpr_fuse_map_count;
  4027. tuple_match = cpr_vreg->cpr_fuse_map_match;
  4028. } else {
  4029. tuple_count = 1;
  4030. tuple_match = 0;
  4031. }
  4032. len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
  4033. * cpr_vreg->num_corners;
  4034. temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
  4035. if (!temp) {
  4036. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4037. return -ENOMEM;
  4038. }
  4039. cpr_vreg->adj_cpus_open_loop_volt = devm_kzalloc(dev,
  4040. sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
  4041. GFP_KERNEL);
  4042. if (!cpr_vreg->adj_cpus_open_loop_volt) {
  4043. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4044. rc = -ENOMEM;
  4045. goto done;
  4046. }
  4047. cpr_vreg->adj_cpus_open_loop_volt[0] = devm_kzalloc(dev,
  4048. sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
  4049. * (cpr_vreg->num_corners + 1),
  4050. GFP_KERNEL);
  4051. if (!cpr_vreg->adj_cpus_open_loop_volt[0]) {
  4052. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4053. rc = -ENOMEM;
  4054. goto done;
  4055. }
  4056. for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
  4057. cpr_vreg->adj_cpus_open_loop_volt[i] =
  4058. cpr_vreg->adj_cpus_open_loop_volt[0] +
  4059. i * (cpr_vreg->num_corners + 1);
  4060. rc = of_property_read_u32_array(dev->of_node,
  4061. "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
  4062. temp, len);
  4063. if (rc) {
  4064. cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment, rc=%d\n",
  4065. rc);
  4066. goto done;
  4067. }
  4068. cpr_debug(cpr_vreg, "Open loop voltage based on number of online CPUs:\n");
  4069. offset = tuple_match * cpr_vreg->num_corners *
  4070. (cpr_vreg->num_adj_cpus + 1);
  4071. for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
  4072. for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
  4073. k = j - 1 + offset;
  4074. cpr_vreg->adj_cpus_open_loop_volt[i][j]
  4075. = cpr_vreg->open_loop_volt[j] + temp[k];
  4076. cpr_vreg->adj_cpus_open_loop_volt[i][j]
  4077. = DIV_ROUND_UP(cpr_vreg->
  4078. adj_cpus_open_loop_volt[i][j],
  4079. cpr_vreg->step_volt) * cpr_vreg->step_volt;
  4080. if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
  4081. > cpr_vreg->ceiling_volt[j])
  4082. cpr_vreg->adj_cpus_open_loop_volt[i][j]
  4083. = cpr_vreg->ceiling_volt[j];
  4084. if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
  4085. < cpr_vreg->floor_volt[j])
  4086. cpr_vreg->adj_cpus_open_loop_volt[i][j]
  4087. = cpr_vreg->floor_volt[j];
  4088. cpr_debug(cpr_vreg, "cpus=%d, corner=%d, volt=%d\n",
  4089. i, j, cpr_vreg->adj_cpus_open_loop_volt[i][j]);
  4090. }
  4091. offset += cpr_vreg->num_corners;
  4092. }
  4093. cpr_vreg->adj_cpus_open_loop_volt_as_ceiling
  4094. = of_property_read_bool(dev->of_node,
  4095. "qcom,cpr-online-cpu-init-voltage-as-ceiling");
  4096. done:
  4097. kfree(temp);
  4098. return rc;
  4099. }
  4100. static int cpr_parse_adj_cpus_target_quot(struct cpr_regulator *cpr_vreg,
  4101. struct device *dev)
  4102. {
  4103. int rc, i, j, k, tuple_count, tuple_match, len, offset;
  4104. int *temp;
  4105. if (!of_find_property(dev->of_node,
  4106. "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
  4107. NULL))
  4108. return 0;
  4109. if (cpr_vreg->cpr_fuse_map_count) {
  4110. if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
  4111. /* No matching index to use for quotient adjustment. */
  4112. return 0;
  4113. }
  4114. tuple_count = cpr_vreg->cpr_fuse_map_count;
  4115. tuple_match = cpr_vreg->cpr_fuse_map_match;
  4116. } else {
  4117. tuple_count = 1;
  4118. tuple_match = 0;
  4119. }
  4120. len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
  4121. * cpr_vreg->num_corners;
  4122. temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
  4123. if (!temp) {
  4124. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4125. return -ENOMEM;
  4126. }
  4127. cpr_vreg->adj_cpus_quot_adjust = devm_kzalloc(dev,
  4128. sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
  4129. GFP_KERNEL);
  4130. if (!cpr_vreg->adj_cpus_quot_adjust) {
  4131. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4132. rc = -ENOMEM;
  4133. goto done;
  4134. }
  4135. cpr_vreg->adj_cpus_quot_adjust[0] = devm_kzalloc(dev,
  4136. sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
  4137. * (cpr_vreg->num_corners + 1),
  4138. GFP_KERNEL);
  4139. if (!cpr_vreg->adj_cpus_quot_adjust[0]) {
  4140. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4141. rc = -ENOMEM;
  4142. goto done;
  4143. }
  4144. for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
  4145. cpr_vreg->adj_cpus_quot_adjust[i] =
  4146. cpr_vreg->adj_cpus_quot_adjust[0] +
  4147. i * (cpr_vreg->num_corners + 1);
  4148. rc = of_property_read_u32_array(dev->of_node,
  4149. "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
  4150. temp, len);
  4151. if (rc) {
  4152. cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-quotient-adjustment, rc=%d\n",
  4153. rc);
  4154. goto done;
  4155. }
  4156. cpr_debug(cpr_vreg, "Target quotients based on number of online CPUs:\n");
  4157. offset = tuple_match * cpr_vreg->num_corners *
  4158. (cpr_vreg->num_adj_cpus + 1);
  4159. for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
  4160. for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
  4161. k = j - 1 + offset;
  4162. cpr_vreg->adj_cpus_quot_adjust[i][j] =
  4163. cpr_vreg->quot_adjust[j] - temp[k];
  4164. cpr_debug(cpr_vreg, "cpus=%d, corner=%d, quot=%d\n",
  4165. i, j,
  4166. cpr_vreg->cpr_fuse_target_quot[
  4167. cpr_vreg->corner_map[j]]
  4168. - cpr_vreg->adj_cpus_quot_adjust[i][j]);
  4169. }
  4170. offset += cpr_vreg->num_corners;
  4171. }
  4172. done:
  4173. kfree(temp);
  4174. return rc;
  4175. }
  4176. static int cpr_init_per_cpu_adjustments(struct cpr_regulator *cpr_vreg,
  4177. struct device *dev)
  4178. {
  4179. int rc, i, j;
  4180. if (!of_find_property(dev->of_node,
  4181. "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
  4182. NULL)
  4183. && !of_find_property(dev->of_node,
  4184. "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
  4185. NULL)) {
  4186. /* No per-online CPU adjustment needed */
  4187. return 0;
  4188. }
  4189. if (!cpr_vreg->num_adj_cpus) {
  4190. cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
  4191. return -EINVAL;
  4192. }
  4193. rc = cpr_parse_adj_cpus_init_voltage(cpr_vreg, dev);
  4194. if (rc) {
  4195. cpr_err(cpr_vreg, "cpr_parse_adj_cpus_init_voltage failed: rc =%d\n",
  4196. rc);
  4197. return rc;
  4198. }
  4199. rc = cpr_parse_adj_cpus_target_quot(cpr_vreg, dev);
  4200. if (rc) {
  4201. cpr_err(cpr_vreg, "cpr_parse_adj_cpus_target_quot failed: rc =%d\n",
  4202. rc);
  4203. return rc;
  4204. }
  4205. cpr_vreg->adj_cpus_last_volt = devm_kzalloc(dev,
  4206. sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
  4207. GFP_KERNEL);
  4208. cpr_vreg->adj_cpus_save_ctl = devm_kzalloc(dev,
  4209. sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
  4210. GFP_KERNEL);
  4211. cpr_vreg->adj_cpus_save_irq = devm_kzalloc(dev,
  4212. sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
  4213. GFP_KERNEL);
  4214. if (!cpr_vreg->adj_cpus_last_volt || !cpr_vreg->adj_cpus_save_ctl ||
  4215. !cpr_vreg->adj_cpus_save_irq) {
  4216. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4217. return -ENOMEM;
  4218. }
  4219. cpr_vreg->adj_cpus_last_volt[0] = devm_kzalloc(dev,
  4220. sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
  4221. * (cpr_vreg->num_corners + 1),
  4222. GFP_KERNEL);
  4223. cpr_vreg->adj_cpus_save_ctl[0] = devm_kzalloc(dev,
  4224. sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
  4225. * (cpr_vreg->num_corners + 1),
  4226. GFP_KERNEL);
  4227. cpr_vreg->adj_cpus_save_irq[0] = devm_kzalloc(dev,
  4228. sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
  4229. * (cpr_vreg->num_corners + 1),
  4230. GFP_KERNEL);
  4231. if (!cpr_vreg->adj_cpus_last_volt[0] ||
  4232. !cpr_vreg->adj_cpus_save_ctl[0] ||
  4233. !cpr_vreg->adj_cpus_save_irq[0]) {
  4234. cpr_err(cpr_vreg, "Could not allocate memory\n");
  4235. return -ENOMEM;
  4236. }
  4237. for (i = 1; i <= cpr_vreg->num_adj_cpus; i++) {
  4238. j = i * (cpr_vreg->num_corners + 1);
  4239. cpr_vreg->adj_cpus_last_volt[i] =
  4240. cpr_vreg->adj_cpus_last_volt[0] + j;
  4241. cpr_vreg->adj_cpus_save_ctl[i] =
  4242. cpr_vreg->adj_cpus_save_ctl[0] + j;
  4243. cpr_vreg->adj_cpus_save_irq[i] =
  4244. cpr_vreg->adj_cpus_save_irq[0] + j;
  4245. }
  4246. for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
  4247. for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
  4248. cpr_vreg->adj_cpus_save_ctl[i][j] =
  4249. cpr_vreg->save_ctl[j];
  4250. cpr_vreg->adj_cpus_save_irq[i][j] =
  4251. cpr_vreg->save_irq[j];
  4252. cpr_vreg->adj_cpus_last_volt[i][j]
  4253. = cpr_vreg->adj_cpus_open_loop_volt
  4254. ? cpr_vreg->adj_cpus_open_loop_volt[i][j]
  4255. : cpr_vreg->open_loop_volt[j];
  4256. }
  4257. }
  4258. cpr_regulator_set_online_cpus(cpr_vreg);
  4259. cpr_debug(cpr_vreg, "%d cpus online\n", cpr_vreg->online_cpus);
  4260. devm_kfree(dev, cpr_vreg->last_volt);
  4261. devm_kfree(dev, cpr_vreg->save_ctl);
  4262. devm_kfree(dev, cpr_vreg->save_irq);
  4263. if (cpr_vreg->adj_cpus_quot_adjust)
  4264. devm_kfree(dev, cpr_vreg->quot_adjust);
  4265. if (cpr_vreg->adj_cpus_open_loop_volt)
  4266. devm_kfree(dev, cpr_vreg->open_loop_volt);
  4267. if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
  4268. devm_kfree(dev, cpr_vreg->ceiling_volt);
  4269. cpr_regulator_switch_adj_cpus(cpr_vreg);
  4270. cpr_vreg->skip_voltage_change_during_suspend
  4271. = of_property_read_bool(dev->of_node,
  4272. "qcom,cpr-skip-voltage-change-during-suspend");
  4273. cpr_vreg->cpu_notifier.notifier_call = cpr_regulator_cpu_callback;
  4274. register_hotcpu_notifier(&cpr_vreg->cpu_notifier);
  4275. return rc;
  4276. }
  4277. static int cpr_init_pm_notification(struct cpr_regulator *cpr_vreg)
  4278. {
  4279. int rc;
  4280. /* enabled only for single-core designs */
  4281. if (cpr_vreg->num_adj_cpus != 1) {
  4282. pr_warn("qcom,cpr-cpus not defined or invalid %d\n",
  4283. cpr_vreg->num_adj_cpus);
  4284. return 0;
  4285. }
  4286. cpr_vreg->pm_notifier.notifier_call = cpr_pm_callback;
  4287. rc = cpu_pm_register_notifier(&cpr_vreg->pm_notifier);
  4288. if (rc)
  4289. cpr_err(cpr_vreg, "Unable to register pm notifier rc=%d\n", rc);
  4290. return rc;
  4291. }
  4292. static int cpr_rpm_apc_init(struct platform_device *pdev,
  4293. struct cpr_regulator *cpr_vreg)
  4294. {
  4295. int rc, len = 0;
  4296. struct device_node *of_node = pdev->dev.of_node;
  4297. if (!of_find_property(of_node, "rpm-apc-supply", NULL))
  4298. return 0;
  4299. cpr_vreg->rpm_apc_vreg = devm_regulator_get(&pdev->dev, "rpm-apc");
  4300. if (IS_ERR_OR_NULL(cpr_vreg->rpm_apc_vreg)) {
  4301. rc = PTR_RET(cpr_vreg->rpm_apc_vreg);
  4302. if (rc != -EPROBE_DEFER)
  4303. cpr_err(cpr_vreg, "devm_regulator_get: rpm-apc: rc=%d\n",
  4304. rc);
  4305. return rc;
  4306. }
  4307. if (!of_find_property(of_node, "qcom,rpm-apc-corner-map", &len)) {
  4308. cpr_err(cpr_vreg,
  4309. "qcom,rpm-apc-corner-map missing:\n");
  4310. return -EINVAL;
  4311. }
  4312. if (len != cpr_vreg->num_corners * sizeof(u32)) {
  4313. cpr_err(cpr_vreg,
  4314. "qcom,rpm-apc-corner-map length=%d is invalid: required:%d\n",
  4315. len, cpr_vreg->num_corners);
  4316. return -EINVAL;
  4317. }
  4318. cpr_vreg->rpm_apc_corner_map = devm_kzalloc(&pdev->dev,
  4319. (cpr_vreg->num_corners + 1) *
  4320. sizeof(*cpr_vreg->rpm_apc_corner_map), GFP_KERNEL);
  4321. if (!cpr_vreg->rpm_apc_corner_map) {
  4322. cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->rpm_apc_corner_map\n");
  4323. return -ENOMEM;
  4324. }
  4325. rc = of_property_read_u32_array(of_node, "qcom,rpm-apc-corner-map",
  4326. &cpr_vreg->rpm_apc_corner_map[1], cpr_vreg->num_corners);
  4327. if (rc)
  4328. cpr_err(cpr_vreg, "read qcom,rpm-apc-corner-map failed, rc = %d\n",
  4329. rc);
  4330. return rc;
  4331. }
  4332. static int cpr_vsens_init(struct platform_device *pdev,
  4333. struct cpr_regulator *cpr_vreg)
  4334. {
  4335. int rc = 0, len = 0;
  4336. struct device_node *of_node = pdev->dev.of_node;
  4337. if (of_find_property(of_node, "vdd-vsens-voltage-supply", NULL)) {
  4338. cpr_vreg->vdd_vsens_voltage = devm_regulator_get(&pdev->dev,
  4339. "vdd-vsens-voltage");
  4340. if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_voltage)) {
  4341. rc = PTR_ERR(cpr_vreg->vdd_vsens_voltage);
  4342. cpr_vreg->vdd_vsens_voltage = NULL;
  4343. if (rc == -EPROBE_DEFER)
  4344. return rc;
  4345. /* device not found */
  4346. cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-voltage: rc=%d\n",
  4347. rc);
  4348. return 0;
  4349. }
  4350. }
  4351. if (of_find_property(of_node, "vdd-vsens-corner-supply", NULL)) {
  4352. cpr_vreg->vdd_vsens_corner = devm_regulator_get(&pdev->dev,
  4353. "vdd-vsens-corner");
  4354. if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_corner)) {
  4355. rc = PTR_ERR(cpr_vreg->vdd_vsens_corner);
  4356. cpr_vreg->vdd_vsens_corner = NULL;
  4357. if (rc == -EPROBE_DEFER)
  4358. return rc;
  4359. /* device not found */
  4360. cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-corner: rc=%d\n",
  4361. rc);
  4362. return 0;
  4363. }
  4364. if (!of_find_property(of_node, "qcom,vsens-corner-map", &len)) {
  4365. cpr_err(cpr_vreg, "qcom,vsens-corner-map missing\n");
  4366. return -EINVAL;
  4367. }
  4368. if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
  4369. cpr_err(cpr_vreg, "qcom,vsens-corner-map length=%d is invalid: required:%d\n",
  4370. len, cpr_vreg->num_fuse_corners);
  4371. return -EINVAL;
  4372. }
  4373. cpr_vreg->vsens_corner_map = devm_kcalloc(&pdev->dev,
  4374. (cpr_vreg->num_fuse_corners + 1),
  4375. sizeof(*cpr_vreg->vsens_corner_map), GFP_KERNEL);
  4376. if (!cpr_vreg->vsens_corner_map)
  4377. return -ENOMEM;
  4378. rc = of_property_read_u32_array(of_node,
  4379. "qcom,vsens-corner-map",
  4380. &cpr_vreg->vsens_corner_map[1],
  4381. cpr_vreg->num_fuse_corners);
  4382. if (rc)
  4383. cpr_err(cpr_vreg, "read qcom,vsens-corner-map failed, rc = %d\n",
  4384. rc);
  4385. }
  4386. return rc;
  4387. }
  4388. static int cpr_init_cpr(struct platform_device *pdev,
  4389. struct cpr_regulator *cpr_vreg)
  4390. {
  4391. struct resource *res;
  4392. int rc = 0;
  4393. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr_clk");
  4394. if (res && res->start)
  4395. cpr_vreg->rbcpr_clk_addr = res->start;
  4396. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr");
  4397. if (!res || !res->start) {
  4398. cpr_err(cpr_vreg, "missing rbcpr address: res=%p\n", res);
  4399. return -EINVAL;
  4400. }
  4401. cpr_vreg->rbcpr_base = devm_ioremap(&pdev->dev, res->start,
  4402. resource_size(res));
  4403. /* Init CPR configuration parameters */
  4404. rc = cpr_init_cpr_parameters(pdev, cpr_vreg);
  4405. if (rc)
  4406. return rc;
  4407. rc = cpr_init_cpr_efuse(pdev, cpr_vreg);
  4408. if (rc)
  4409. return rc;
  4410. /* Load per corner ceiling and floor voltages if they exist. */
  4411. rc = cpr_init_ceiling_floor_override_voltages(cpr_vreg, &pdev->dev);
  4412. if (rc)
  4413. return rc;
  4414. /*
  4415. * Limit open loop voltages based upon per corner ceiling and floor
  4416. * voltages.
  4417. */
  4418. rc = cpr_limit_open_loop_voltage(cpr_vreg);
  4419. if (rc)
  4420. return rc;
  4421. /*
  4422. * Fill the OPP table for this device with virtual voltage corner to
  4423. * open-loop voltage pairs.
  4424. */
  4425. rc = cpr_populate_opp_table(cpr_vreg, &pdev->dev);
  4426. if (rc)
  4427. return rc;
  4428. /* Reduce the ceiling voltage if allowed. */
  4429. rc = cpr_reduce_ceiling_voltage(cpr_vreg, &pdev->dev);
  4430. if (rc)
  4431. return rc;
  4432. /* Load CPR floor to ceiling range if exist. */
  4433. rc = cpr_init_floor_to_ceiling_range(cpr_vreg, &pdev->dev);
  4434. if (rc)
  4435. return rc;
  4436. /* Init all voltage set points of APC regulator for CPR */
  4437. rc = cpr_init_cpr_voltages(cpr_vreg, &pdev->dev);
  4438. if (rc)
  4439. return rc;
  4440. /* Get and Init interrupt */
  4441. cpr_vreg->cpr_irq = platform_get_irq(pdev, 0);
  4442. if (!cpr_vreg->cpr_irq) {
  4443. cpr_err(cpr_vreg, "missing CPR IRQ\n");
  4444. return -EINVAL;
  4445. }
  4446. /* Configure CPR HW but keep it disabled */
  4447. rc = cpr_config(cpr_vreg, &pdev->dev);
  4448. if (rc)
  4449. return rc;
  4450. rc = request_threaded_irq(cpr_vreg->cpr_irq, NULL, cpr_irq_handler,
  4451. IRQF_ONESHOT | IRQF_TRIGGER_RISING, "cpr",
  4452. cpr_vreg);
  4453. if (rc) {
  4454. cpr_err(cpr_vreg, "CPR: request irq failed for IRQ %d\n",
  4455. cpr_vreg->cpr_irq);
  4456. return rc;
  4457. }
  4458. return 0;
  4459. }
  4460. /*
  4461. * Create a set of virtual fuse rows if optional device tree properties are
  4462. * present.
  4463. */
  4464. static int cpr_remap_efuse_data(struct platform_device *pdev,
  4465. struct cpr_regulator *cpr_vreg)
  4466. {
  4467. struct device_node *of_node = pdev->dev.of_node;
  4468. struct property *prop;
  4469. u64 fuse_param;
  4470. u32 *temp;
  4471. int size, rc, i, bits, in_row, in_bit, out_row, out_bit;
  4472. prop = of_find_property(of_node, "qcom,fuse-remap-source", NULL);
  4473. if (!prop) {
  4474. /* No fuse remapping needed. */
  4475. return 0;
  4476. }
  4477. size = prop->length / sizeof(u32);
  4478. if (size == 0 || size % 4) {
  4479. cpr_err(cpr_vreg, "qcom,fuse-remap-source has invalid size=%d\n",
  4480. size);
  4481. return -EINVAL;
  4482. }
  4483. size /= 4;
  4484. rc = of_property_read_u32(of_node, "qcom,fuse-remap-base-row",
  4485. &cpr_vreg->remapped_row_base);
  4486. if (rc) {
  4487. cpr_err(cpr_vreg, "could not read qcom,fuse-remap-base-row, rc=%d\n",
  4488. rc);
  4489. return rc;
  4490. }
  4491. temp = kzalloc(sizeof(*temp) * size * 4, GFP_KERNEL);
  4492. if (!temp) {
  4493. cpr_err(cpr_vreg, "temp memory allocation failed\n");
  4494. return -ENOMEM;
  4495. }
  4496. rc = of_property_read_u32_array(of_node, "qcom,fuse-remap-source", temp,
  4497. size * 4);
  4498. if (rc) {
  4499. cpr_err(cpr_vreg, "could not read qcom,fuse-remap-source, rc=%d\n",
  4500. rc);
  4501. goto done;
  4502. }
  4503. /*
  4504. * Format of tuples in qcom,fuse-remap-source property:
  4505. * <row bit-offset bit-count fuse-read-method>
  4506. */
  4507. for (i = 0, bits = 0; i < size; i++)
  4508. bits += temp[i * 4 + 2];
  4509. cpr_vreg->num_remapped_rows = DIV_ROUND_UP(bits, 64);
  4510. cpr_vreg->remapped_row = devm_kzalloc(&pdev->dev,
  4511. sizeof(*cpr_vreg->remapped_row) * cpr_vreg->num_remapped_rows,
  4512. GFP_KERNEL);
  4513. if (!cpr_vreg->remapped_row) {
  4514. cpr_err(cpr_vreg, "remapped_row memory allocation failed\n");
  4515. rc = -ENOMEM;
  4516. goto done;
  4517. }
  4518. for (i = 0, out_row = 0, out_bit = 0; i < size; i++) {
  4519. in_row = temp[i * 4];
  4520. in_bit = temp[i * 4 + 1];
  4521. bits = temp[i * 4 + 2];
  4522. while (bits > 64) {
  4523. fuse_param = cpr_read_efuse_param(cpr_vreg, in_row,
  4524. in_bit, 64, temp[i * 4 + 3]);
  4525. cpr_vreg->remapped_row[out_row++]
  4526. |= fuse_param << out_bit;
  4527. if (out_bit > 0)
  4528. cpr_vreg->remapped_row[out_row]
  4529. |= fuse_param >> (64 - out_bit);
  4530. bits -= 64;
  4531. in_bit += 64;
  4532. }
  4533. fuse_param = cpr_read_efuse_param(cpr_vreg, in_row, in_bit,
  4534. bits, temp[i * 4 + 3]);
  4535. cpr_vreg->remapped_row[out_row] |= fuse_param << out_bit;
  4536. if (bits < 64 - out_bit) {
  4537. out_bit += bits;
  4538. } else {
  4539. out_row++;
  4540. if (out_bit > 0)
  4541. cpr_vreg->remapped_row[out_row]
  4542. |= fuse_param >> (64 - out_bit);
  4543. out_bit = bits - (64 - out_bit);
  4544. }
  4545. }
  4546. done:
  4547. kfree(temp);
  4548. return rc;
  4549. }
  4550. static int cpr_efuse_init(struct platform_device *pdev,
  4551. struct cpr_regulator *cpr_vreg)
  4552. {
  4553. struct resource *res;
  4554. int len;
  4555. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
  4556. if (!res || !res->start) {
  4557. cpr_err(cpr_vreg, "efuse_addr missing: res=%p\n", res);
  4558. return -EINVAL;
  4559. }
  4560. cpr_vreg->efuse_addr = res->start;
  4561. len = res->end - res->start + 1;
  4562. cpr_info(cpr_vreg, "efuse_addr = %pa (len=0x%x)\n", &res->start, len);
  4563. cpr_vreg->efuse_base = ioremap(cpr_vreg->efuse_addr, len);
  4564. if (!cpr_vreg->efuse_base) {
  4565. cpr_err(cpr_vreg, "Unable to map efuse_addr %pa\n",
  4566. &cpr_vreg->efuse_addr);
  4567. return -EINVAL;
  4568. }
  4569. return 0;
  4570. }
  4571. static void cpr_efuse_free(struct cpr_regulator *cpr_vreg)
  4572. {
  4573. iounmap(cpr_vreg->efuse_base);
  4574. }
  4575. static void cpr_parse_cond_min_volt_fuse(struct cpr_regulator *cpr_vreg,
  4576. struct device_node *of_node)
  4577. {
  4578. int rc;
  4579. u32 fuse_sel[5];
  4580. /*
  4581. * Restrict all pvs corner voltages to a minimum value of
  4582. * qcom,cpr-cond-min-voltage if the fuse defined in
  4583. * qcom,cpr-fuse-cond-min-volt-sel does not read back with
  4584. * the expected value.
  4585. */
  4586. rc = of_property_read_u32_array(of_node,
  4587. "qcom,cpr-fuse-cond-min-volt-sel", fuse_sel, 5);
  4588. if (!rc) {
  4589. if (!cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel))
  4590. cpr_vreg->flags |= FLAGS_SET_MIN_VOLTAGE;
  4591. }
  4592. }
  4593. static void cpr_parse_speed_bin_fuse(struct cpr_regulator *cpr_vreg,
  4594. struct device_node *of_node)
  4595. {
  4596. int rc;
  4597. u64 fuse_bits;
  4598. u32 fuse_sel[4];
  4599. u32 speed_bits;
  4600. rc = of_property_read_u32_array(of_node,
  4601. "qcom,speed-bin-fuse-sel", fuse_sel, 4);
  4602. if (!rc) {
  4603. fuse_bits = cpr_read_efuse_row(cpr_vreg,
  4604. fuse_sel[0], fuse_sel[3]);
  4605. speed_bits = (fuse_bits >> fuse_sel[1]) &
  4606. ((1 << fuse_sel[2]) - 1);
  4607. cpr_info(cpr_vreg, "[row: %d]: 0x%llx, speed_bits = %d\n",
  4608. fuse_sel[0], fuse_bits, speed_bits);
  4609. cpr_vreg->speed_bin = speed_bits;
  4610. } else {
  4611. cpr_vreg->speed_bin = SPEED_BIN_NONE;
  4612. }
  4613. }
  4614. static int cpr_voltage_uplift_enable_check(struct cpr_regulator *cpr_vreg,
  4615. struct device_node *of_node)
  4616. {
  4617. int rc;
  4618. u32 fuse_sel[5];
  4619. u32 uplift_speed_bin;
  4620. rc = of_property_read_u32_array(of_node,
  4621. "qcom,cpr-fuse-uplift-sel", fuse_sel, 5);
  4622. if (!rc) {
  4623. rc = of_property_read_u32(of_node,
  4624. "qcom,cpr-uplift-speed-bin",
  4625. &uplift_speed_bin);
  4626. if (rc < 0) {
  4627. cpr_err(cpr_vreg,
  4628. "qcom,cpr-uplift-speed-bin missing\n");
  4629. return rc;
  4630. }
  4631. if (cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel)
  4632. && (uplift_speed_bin == cpr_vreg->speed_bin)
  4633. && !(cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE)) {
  4634. cpr_vreg->flags |= FLAGS_UPLIFT_QUOT_VOLT;
  4635. }
  4636. }
  4637. return 0;
  4638. }
  4639. /*
  4640. * Read in the number of fuse corners and then allocate memory for arrays that
  4641. * are sized based upon the number of fuse corners.
  4642. */
  4643. static int cpr_fuse_corner_array_alloc(struct device *dev,
  4644. struct cpr_regulator *cpr_vreg)
  4645. {
  4646. int rc;
  4647. size_t len;
  4648. rc = of_property_read_u32(dev->of_node, "qcom,cpr-fuse-corners",
  4649. &cpr_vreg->num_fuse_corners);
  4650. if (rc < 0) {
  4651. cpr_err(cpr_vreg, "qcom,cpr-fuse-corners missing: rc=%d\n", rc);
  4652. return rc;
  4653. }
  4654. if (cpr_vreg->num_fuse_corners < CPR_FUSE_CORNER_MIN
  4655. || cpr_vreg->num_fuse_corners > CPR_FUSE_CORNER_LIMIT) {
  4656. cpr_err(cpr_vreg, "corner count=%d is invalid\n",
  4657. cpr_vreg->num_fuse_corners);
  4658. return -EINVAL;
  4659. }
  4660. /*
  4661. * The arrays sized based on the fuse corner count ignore element 0
  4662. * in order to simplify indexing throughout the driver since min_uV = 0
  4663. * cannot be passed into a set_voltage() callback.
  4664. */
  4665. len = cpr_vreg->num_fuse_corners + 1;
  4666. cpr_vreg->pvs_corner_v = devm_kzalloc(dev,
  4667. len * sizeof(*cpr_vreg->pvs_corner_v), GFP_KERNEL);
  4668. cpr_vreg->cpr_fuse_target_quot = devm_kzalloc(dev,
  4669. len * sizeof(*cpr_vreg->cpr_fuse_target_quot), GFP_KERNEL);
  4670. cpr_vreg->cpr_fuse_ro_sel = devm_kzalloc(dev,
  4671. len * sizeof(*cpr_vreg->cpr_fuse_ro_sel), GFP_KERNEL);
  4672. cpr_vreg->fuse_ceiling_volt = devm_kzalloc(dev,
  4673. len * (sizeof(*cpr_vreg->fuse_ceiling_volt)), GFP_KERNEL);
  4674. cpr_vreg->fuse_floor_volt = devm_kzalloc(dev,
  4675. len * (sizeof(*cpr_vreg->fuse_floor_volt)), GFP_KERNEL);
  4676. cpr_vreg->step_quotient = devm_kzalloc(dev,
  4677. len * sizeof(*cpr_vreg->step_quotient), GFP_KERNEL);
  4678. if (cpr_vreg->pvs_corner_v == NULL || cpr_vreg->cpr_fuse_ro_sel == NULL
  4679. || cpr_vreg->fuse_ceiling_volt == NULL
  4680. || cpr_vreg->fuse_floor_volt == NULL
  4681. || cpr_vreg->cpr_fuse_target_quot == NULL
  4682. || cpr_vreg->step_quotient == NULL) {
  4683. cpr_err(cpr_vreg, "Could not allocate memory for CPR arrays\n");
  4684. return -ENOMEM;
  4685. }
  4686. return 0;
  4687. }
  4688. static int cpr_voltage_plan_init(struct platform_device *pdev,
  4689. struct cpr_regulator *cpr_vreg)
  4690. {
  4691. struct device_node *of_node = pdev->dev.of_node;
  4692. int rc, i;
  4693. u32 min_uv = 0;
  4694. rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-ceiling",
  4695. &cpr_vreg->fuse_ceiling_volt[CPR_FUSE_CORNER_MIN],
  4696. cpr_vreg->num_fuse_corners);
  4697. if (rc < 0) {
  4698. cpr_err(cpr_vreg, "cpr-voltage-ceiling missing: rc=%d\n", rc);
  4699. return rc;
  4700. }
  4701. rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-floor",
  4702. &cpr_vreg->fuse_floor_volt[CPR_FUSE_CORNER_MIN],
  4703. cpr_vreg->num_fuse_corners);
  4704. if (rc < 0) {
  4705. cpr_err(cpr_vreg, "cpr-voltage-floor missing: rc=%d\n", rc);
  4706. return rc;
  4707. }
  4708. cpr_parse_cond_min_volt_fuse(cpr_vreg, of_node);
  4709. rc = cpr_voltage_uplift_enable_check(cpr_vreg, of_node);
  4710. if (rc < 0) {
  4711. cpr_err(cpr_vreg, "voltage uplift enable check failed, %d\n",
  4712. rc);
  4713. return rc;
  4714. }
  4715. if (cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE) {
  4716. of_property_read_u32(of_node, "qcom,cpr-cond-min-voltage",
  4717. &min_uv);
  4718. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
  4719. i++)
  4720. if (cpr_vreg->fuse_ceiling_volt[i] < min_uv) {
  4721. cpr_vreg->fuse_ceiling_volt[i] = min_uv;
  4722. cpr_vreg->fuse_floor_volt[i] = min_uv;
  4723. } else if (cpr_vreg->fuse_floor_volt[i] < min_uv) {
  4724. cpr_vreg->fuse_floor_volt[i] = min_uv;
  4725. }
  4726. }
  4727. return 0;
  4728. }
  4729. static int cpr_mem_acc_init(struct platform_device *pdev,
  4730. struct cpr_regulator *cpr_vreg)
  4731. {
  4732. int rc, size;
  4733. struct property *prop;
  4734. char *corner_map_str;
  4735. if (of_find_property(pdev->dev.of_node, "mem-acc-supply", NULL)) {
  4736. cpr_vreg->mem_acc_vreg = devm_regulator_get(&pdev->dev,
  4737. "mem-acc");
  4738. if (IS_ERR_OR_NULL(cpr_vreg->mem_acc_vreg)) {
  4739. rc = PTR_RET(cpr_vreg->mem_acc_vreg);
  4740. if (rc != -EPROBE_DEFER)
  4741. cpr_err(cpr_vreg,
  4742. "devm_regulator_get: mem-acc: rc=%d\n",
  4743. rc);
  4744. return rc;
  4745. }
  4746. }
  4747. corner_map_str = "qcom,mem-acc-corner-map";
  4748. prop = of_find_property(pdev->dev.of_node, corner_map_str, NULL);
  4749. if (!prop) {
  4750. corner_map_str = "qcom,cpr-corner-map";
  4751. prop = of_find_property(pdev->dev.of_node, corner_map_str,
  4752. NULL);
  4753. if (!prop) {
  4754. cpr_err(cpr_vreg, "qcom,cpr-corner-map missing\n");
  4755. return -EINVAL;
  4756. }
  4757. }
  4758. size = prop->length / sizeof(u32);
  4759. cpr_vreg->mem_acc_corner_map = devm_kzalloc(&pdev->dev,
  4760. sizeof(int) * (size + 1),
  4761. GFP_KERNEL);
  4762. rc = of_property_read_u32_array(pdev->dev.of_node, corner_map_str,
  4763. &cpr_vreg->mem_acc_corner_map[CPR_FUSE_CORNER_MIN],
  4764. size);
  4765. if (rc) {
  4766. cpr_err(cpr_vreg, "%s missing, rc = %d\n", corner_map_str, rc);
  4767. return rc;
  4768. }
  4769. return 0;
  4770. }
  4771. #if defined(CONFIG_DEBUG_FS)
  4772. static int cpr_enable_set(void *data, u64 val)
  4773. {
  4774. struct cpr_regulator *cpr_vreg = data;
  4775. bool old_cpr_enable;
  4776. mutex_lock(&cpr_vreg->cpr_mutex);
  4777. old_cpr_enable = cpr_vreg->enable;
  4778. cpr_vreg->enable = val;
  4779. if (old_cpr_enable == cpr_vreg->enable)
  4780. goto _exit;
  4781. if (cpr_vreg->enable && cpr_vreg->cpr_fuse_disable) {
  4782. cpr_info(cpr_vreg,
  4783. "CPR permanently disabled due to fuse values\n");
  4784. cpr_vreg->enable = false;
  4785. goto _exit;
  4786. }
  4787. cpr_debug(cpr_vreg, "%s CPR [corner=%d, fuse_corner=%d]\n",
  4788. cpr_vreg->enable ? "enabling" : "disabling",
  4789. cpr_vreg->corner, cpr_vreg->corner_map[cpr_vreg->corner]);
  4790. if (cpr_vreg->corner) {
  4791. if (cpr_vreg->enable) {
  4792. cpr_ctl_disable(cpr_vreg);
  4793. cpr_irq_clr(cpr_vreg);
  4794. cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
  4795. cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
  4796. } else {
  4797. cpr_ctl_disable(cpr_vreg);
  4798. cpr_irq_set(cpr_vreg, 0);
  4799. }
  4800. }
  4801. _exit:
  4802. mutex_unlock(&cpr_vreg->cpr_mutex);
  4803. return 0;
  4804. }
  4805. static int cpr_enable_get(void *data, u64 *val)
  4806. {
  4807. struct cpr_regulator *cpr_vreg = data;
  4808. *val = cpr_vreg->enable;
  4809. return 0;
  4810. }
  4811. DEFINE_SIMPLE_ATTRIBUTE(cpr_enable_fops, cpr_enable_get, cpr_enable_set,
  4812. "%llu\n");
  4813. static int cpr_get_cpr_ceiling(void *data, u64 *val)
  4814. {
  4815. struct cpr_regulator *cpr_vreg = data;
  4816. *val = cpr_vreg->ceiling_volt[cpr_vreg->corner];
  4817. return 0;
  4818. }
  4819. DEFINE_SIMPLE_ATTRIBUTE(cpr_ceiling_fops, cpr_get_cpr_ceiling, NULL,
  4820. "%llu\n");
  4821. static int cpr_get_cpr_floor(void *data, u64 *val)
  4822. {
  4823. struct cpr_regulator *cpr_vreg = data;
  4824. *val = cpr_vreg->floor_volt[cpr_vreg->corner];
  4825. return 0;
  4826. }
  4827. DEFINE_SIMPLE_ATTRIBUTE(cpr_floor_fops, cpr_get_cpr_floor, NULL,
  4828. "%llu\n");
  4829. static int cpr_get_cpr_max_ceiling(void *data, u64 *val)
  4830. {
  4831. struct cpr_regulator *cpr_vreg = data;
  4832. *val = cpr_vreg->cpr_max_ceiling[cpr_vreg->corner];
  4833. return 0;
  4834. }
  4835. DEFINE_SIMPLE_ATTRIBUTE(cpr_max_ceiling_fops, cpr_get_cpr_max_ceiling, NULL,
  4836. "%llu\n");
  4837. static int cpr_debug_info_open(struct inode *inode, struct file *file)
  4838. {
  4839. file->private_data = inode->i_private;
  4840. return 0;
  4841. }
  4842. static ssize_t cpr_debug_info_read(struct file *file, char __user *buff,
  4843. size_t count, loff_t *ppos)
  4844. {
  4845. struct cpr_regulator *cpr_vreg = file->private_data;
  4846. char *debugfs_buf;
  4847. ssize_t len, ret = 0;
  4848. u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
  4849. u32 step_dn, step_up, error, error_lt0, busy;
  4850. int fuse_corner;
  4851. debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  4852. if (!debugfs_buf)
  4853. return -ENOMEM;
  4854. mutex_lock(&cpr_vreg->cpr_mutex);
  4855. fuse_corner = cpr_vreg->corner_map[cpr_vreg->corner];
  4856. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4857. "corner = %d, current_volt = %d uV\n",
  4858. cpr_vreg->corner, cpr_vreg->last_volt[cpr_vreg->corner]);
  4859. ret += len;
  4860. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4861. "fuse_corner = %d, current_volt = %d uV\n",
  4862. fuse_corner, cpr_vreg->last_volt[cpr_vreg->corner]);
  4863. ret += len;
  4864. ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
  4865. gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel));
  4866. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4867. "rbcpr_gcnt_target (%u) = 0x%02X\n", ro_sel, gcnt);
  4868. ret += len;
  4869. ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
  4870. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4871. "rbcpr_ctl = 0x%02X\n", ctl);
  4872. ret += len;
  4873. irq_status = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
  4874. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4875. "rbcpr_irq_status = 0x%02X\n", irq_status);
  4876. ret += len;
  4877. reg = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
  4878. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4879. "rbcpr_result_0 = 0x%02X\n", reg);
  4880. ret += len;
  4881. step_dn = reg & 0x01;
  4882. step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
  4883. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4884. " [step_dn = %u", step_dn);
  4885. ret += len;
  4886. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4887. ", step_up = %u", step_up);
  4888. ret += len;
  4889. error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
  4890. & RBCPR_RESULT0_ERROR_STEPS_MASK;
  4891. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4892. ", error_steps = %u", error_steps);
  4893. ret += len;
  4894. error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
  4895. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4896. ", error = %u", error);
  4897. ret += len;
  4898. error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
  4899. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4900. ", error_lt_0 = %u", error_lt0);
  4901. ret += len;
  4902. busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
  4903. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4904. ", busy = %u]\n", busy);
  4905. ret += len;
  4906. mutex_unlock(&cpr_vreg->cpr_mutex);
  4907. ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
  4908. kfree(debugfs_buf);
  4909. return ret;
  4910. }
  4911. static const struct file_operations cpr_debug_info_fops = {
  4912. .open = cpr_debug_info_open,
  4913. .read = cpr_debug_info_read,
  4914. };
  4915. static int cpr_aging_debug_info_open(struct inode *inode, struct file *file)
  4916. {
  4917. file->private_data = inode->i_private;
  4918. return 0;
  4919. }
  4920. static ssize_t cpr_aging_debug_info_read(struct file *file, char __user *buff,
  4921. size_t count, loff_t *ppos)
  4922. {
  4923. struct cpr_regulator *cpr_vreg = file->private_data;
  4924. struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
  4925. char *debugfs_buf;
  4926. ssize_t len, ret = 0;
  4927. int i;
  4928. debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  4929. if (!debugfs_buf)
  4930. return -ENOMEM;
  4931. mutex_lock(&cpr_vreg->cpr_mutex);
  4932. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4933. "aging_adj_volt = [");
  4934. ret += len;
  4935. for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
  4936. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4937. " %d", aging_info->voltage_adjust[i]);
  4938. ret += len;
  4939. }
  4940. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4941. " ]uV\n");
  4942. ret += len;
  4943. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4944. "aging_measurement_done = %s\n",
  4945. aging_info->cpr_aging_done ? "true" : "false");
  4946. ret += len;
  4947. len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
  4948. "aging_measurement_error = %s\n",
  4949. aging_info->cpr_aging_error ? "true" : "false");
  4950. ret += len;
  4951. mutex_unlock(&cpr_vreg->cpr_mutex);
  4952. ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
  4953. kfree(debugfs_buf);
  4954. return ret;
  4955. }
  4956. static const struct file_operations cpr_aging_debug_info_fops = {
  4957. .open = cpr_aging_debug_info_open,
  4958. .read = cpr_aging_debug_info_read,
  4959. };
  4960. static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
  4961. {
  4962. struct dentry *temp;
  4963. if (IS_ERR_OR_NULL(cpr_debugfs_base)) {
  4964. cpr_err(cpr_vreg, "Could not create debugfs nodes since base directory is missing\n");
  4965. return;
  4966. }
  4967. cpr_vreg->debugfs = debugfs_create_dir(cpr_vreg->rdesc.name,
  4968. cpr_debugfs_base);
  4969. if (IS_ERR_OR_NULL(cpr_vreg->debugfs)) {
  4970. cpr_err(cpr_vreg, "debugfs directory creation failed\n");
  4971. return;
  4972. }
  4973. temp = debugfs_create_file("debug_info", S_IRUGO, cpr_vreg->debugfs,
  4974. cpr_vreg, &cpr_debug_info_fops);
  4975. if (IS_ERR_OR_NULL(temp)) {
  4976. cpr_err(cpr_vreg, "debug_info node creation failed\n");
  4977. return;
  4978. }
  4979. temp = debugfs_create_file("cpr_enable", S_IRUGO | S_IWUSR,
  4980. cpr_vreg->debugfs, cpr_vreg, &cpr_enable_fops);
  4981. if (IS_ERR_OR_NULL(temp)) {
  4982. cpr_err(cpr_vreg, "cpr_enable node creation failed\n");
  4983. return;
  4984. }
  4985. temp = debugfs_create_file("cpr_ceiling", S_IRUGO,
  4986. cpr_vreg->debugfs, cpr_vreg, &cpr_ceiling_fops);
  4987. if (IS_ERR_OR_NULL(temp)) {
  4988. cpr_err(cpr_vreg, "cpr_ceiling node creation failed\n");
  4989. return;
  4990. }
  4991. temp = debugfs_create_file("cpr_floor", S_IRUGO,
  4992. cpr_vreg->debugfs, cpr_vreg, &cpr_floor_fops);
  4993. if (IS_ERR_OR_NULL(temp)) {
  4994. cpr_err(cpr_vreg, "cpr_floor node creation failed\n");
  4995. return;
  4996. }
  4997. temp = debugfs_create_file("cpr_max_ceiling", S_IRUGO,
  4998. cpr_vreg->debugfs, cpr_vreg, &cpr_max_ceiling_fops);
  4999. if (IS_ERR_OR_NULL(temp)) {
  5000. cpr_err(cpr_vreg, "cpr_max_ceiling node creation failed\n");
  5001. return;
  5002. }
  5003. if (cpr_vreg->aging_info) {
  5004. temp = debugfs_create_file("aging_debug_info", S_IRUGO,
  5005. cpr_vreg->debugfs, cpr_vreg,
  5006. &cpr_aging_debug_info_fops);
  5007. if (IS_ERR_OR_NULL(temp)) {
  5008. cpr_err(cpr_vreg, "aging_debug_info node creation failed\n");
  5009. return;
  5010. }
  5011. }
  5012. }
  5013. static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
  5014. {
  5015. debugfs_remove_recursive(cpr_vreg->debugfs);
  5016. }
  5017. static void cpr_debugfs_base_init(void)
  5018. {
  5019. cpr_debugfs_base = debugfs_create_dir("cpr-regulator", NULL);
  5020. if (IS_ERR_OR_NULL(cpr_debugfs_base))
  5021. pr_err("cpr-regulator debugfs base directory creation failed\n");
  5022. }
  5023. static void cpr_debugfs_base_remove(void)
  5024. {
  5025. debugfs_remove_recursive(cpr_debugfs_base);
  5026. }
  5027. #else
  5028. static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
  5029. {}
  5030. static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
  5031. {}
  5032. static void cpr_debugfs_base_init(void)
  5033. {}
  5034. static void cpr_debugfs_base_remove(void)
  5035. {}
  5036. #endif
  5037. /**
  5038. * cpr_panic_callback() - panic notification callback function. This function
  5039. * is invoked when a kernel panic occurs.
  5040. * @nfb: Notifier block pointer of CPR regulator
  5041. * @event: Value passed unmodified to notifier function
  5042. * @data: Pointer passed unmodified to notifier function
  5043. *
  5044. * Return: NOTIFY_OK
  5045. */
  5046. static int cpr_panic_callback(struct notifier_block *nfb,
  5047. unsigned long event, void *data)
  5048. {
  5049. struct cpr_regulator *cpr_vreg = container_of(nfb,
  5050. struct cpr_regulator, panic_notifier);
  5051. int corner, fuse_corner, volt;
  5052. corner = cpr_vreg->corner;
  5053. fuse_corner = cpr_vreg->corner_map[corner];
  5054. if (cpr_is_allowed(cpr_vreg))
  5055. volt = cpr_vreg->last_volt[corner];
  5056. else
  5057. volt = cpr_vreg->open_loop_volt[corner];
  5058. cpr_err(cpr_vreg, "[corner:%d, fuse_corner:%d] = %d uV\n",
  5059. corner, fuse_corner, volt);
  5060. return NOTIFY_OK;
  5061. }
  5062. static int cpr_regulator_probe(struct platform_device *pdev)
  5063. {
  5064. struct regulator_config reg_config = {};
  5065. struct cpr_regulator *cpr_vreg;
  5066. struct regulator_desc *rdesc;
  5067. struct device *dev = &pdev->dev;
  5068. struct regulator_init_data *init_data = pdev->dev.platform_data;
  5069. int rc;
  5070. if (!pdev->dev.of_node) {
  5071. dev_err(dev, "Device tree node is missing\n");
  5072. return -EINVAL;
  5073. }
  5074. cpr_vreg = devm_kzalloc(&pdev->dev, sizeof(struct cpr_regulator),
  5075. GFP_KERNEL);
  5076. if (!cpr_vreg)
  5077. return -ENOMEM;
  5078. init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
  5079. &cpr_vreg->rdesc);
  5080. if (!init_data) {
  5081. dev_err(dev, "regulator init data is missing\n");
  5082. return -EINVAL;
  5083. } else {
  5084. init_data->constraints.input_uV
  5085. = init_data->constraints.max_uV;
  5086. init_data->constraints.valid_ops_mask
  5087. |= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
  5088. }
  5089. cpr_vreg->rdesc.name = init_data->constraints.name;
  5090. if (cpr_vreg->rdesc.name == NULL) {
  5091. dev_err(dev, "regulator-name missing\n");
  5092. return -EINVAL;
  5093. }
  5094. rc = cpr_fuse_corner_array_alloc(&pdev->dev, cpr_vreg);
  5095. if (rc)
  5096. return rc;
  5097. rc = cpr_mem_acc_init(pdev, cpr_vreg);
  5098. if (rc) {
  5099. cpr_err(cpr_vreg, "mem_acc intialization error rc=%d\n", rc);
  5100. return rc;
  5101. }
  5102. rc = cpr_efuse_init(pdev, cpr_vreg);
  5103. if (rc) {
  5104. cpr_err(cpr_vreg, "Wrong eFuse address specified: rc=%d\n", rc);
  5105. return rc;
  5106. }
  5107. rc = cpr_remap_efuse_data(pdev, cpr_vreg);
  5108. if (rc) {
  5109. cpr_err(cpr_vreg, "Could not remap fuse data: rc=%d\n", rc);
  5110. return rc;
  5111. }
  5112. rc = cpr_check_redundant(pdev, cpr_vreg);
  5113. if (rc) {
  5114. cpr_err(cpr_vreg, "Could not check redundant fuse: rc=%d\n",
  5115. rc);
  5116. goto err_out;
  5117. }
  5118. rc = cpr_read_fuse_revision(pdev, cpr_vreg);
  5119. if (rc) {
  5120. cpr_err(cpr_vreg, "Could not read fuse revision: rc=%d\n", rc);
  5121. goto err_out;
  5122. }
  5123. cpr_parse_speed_bin_fuse(cpr_vreg, dev->of_node);
  5124. cpr_parse_pvs_version_fuse(cpr_vreg, dev->of_node);
  5125. rc = cpr_read_ro_select(pdev, cpr_vreg);
  5126. if (rc) {
  5127. cpr_err(cpr_vreg, "Could not read RO select: rc=%d\n", rc);
  5128. goto err_out;
  5129. }
  5130. rc = cpr_find_fuse_map_match(pdev, cpr_vreg);
  5131. if (rc) {
  5132. cpr_err(cpr_vreg, "Could not determine fuse mapping match: rc=%d\n",
  5133. rc);
  5134. goto err_out;
  5135. }
  5136. rc = cpr_voltage_plan_init(pdev, cpr_vreg);
  5137. if (rc) {
  5138. cpr_err(cpr_vreg, "Wrong DT parameter specified: rc=%d\n", rc);
  5139. goto err_out;
  5140. }
  5141. rc = cpr_pvs_init(pdev, cpr_vreg);
  5142. if (rc) {
  5143. cpr_err(cpr_vreg, "Initialize PVS wrong: rc=%d\n", rc);
  5144. goto err_out;
  5145. }
  5146. rc = cpr_vsens_init(pdev, cpr_vreg);
  5147. if (rc) {
  5148. cpr_err(cpr_vreg, "Initialize vsens configuration failed rc=%d\n",
  5149. rc);
  5150. return rc;
  5151. }
  5152. rc = cpr_apc_init(pdev, cpr_vreg);
  5153. if (rc) {
  5154. if (rc != -EPROBE_DEFER)
  5155. cpr_err(cpr_vreg, "Initialize APC wrong: rc=%d\n", rc);
  5156. goto err_out;
  5157. }
  5158. rc = cpr_init_cpr(pdev, cpr_vreg);
  5159. if (rc) {
  5160. cpr_err(cpr_vreg, "Initialize CPR failed: rc=%d\n", rc);
  5161. goto err_out;
  5162. }
  5163. rc = cpr_rpm_apc_init(pdev, cpr_vreg);
  5164. if (rc) {
  5165. cpr_err(cpr_vreg, "Initialize RPM APC regulator failed rc=%d\n",
  5166. rc);
  5167. return rc;
  5168. }
  5169. if (of_property_read_bool(pdev->dev.of_node,
  5170. "qcom,disable-closed-loop-in-pc")) {
  5171. rc = cpr_init_pm_notification(cpr_vreg);
  5172. if (rc) {
  5173. cpr_err(cpr_vreg,
  5174. "cpr_init_pm_notification failed rc=%d\n", rc);
  5175. return rc;
  5176. }
  5177. }
  5178. /* Load per-online CPU adjustment data */
  5179. rc = cpr_init_per_cpu_adjustments(cpr_vreg, &pdev->dev);
  5180. if (rc) {
  5181. cpr_err(cpr_vreg, "cpr_init_per_cpu_adjustments failed: rc=%d\n",
  5182. rc);
  5183. goto err_out;
  5184. }
  5185. /* Parse dependency parameters */
  5186. if (cpr_vreg->vdd_mx) {
  5187. rc = cpr_parse_vdd_mx_parameters(pdev, cpr_vreg);
  5188. if (rc) {
  5189. cpr_err(cpr_vreg, "parsing vdd_mx parameters failed: rc=%d\n",
  5190. rc);
  5191. goto err_out;
  5192. }
  5193. }
  5194. cpr_efuse_free(cpr_vreg);
  5195. /*
  5196. * Ensure that enable state accurately reflects the case in which CPR
  5197. * is permanently disabled.
  5198. */
  5199. cpr_vreg->enable &= !cpr_vreg->cpr_fuse_disable;
  5200. mutex_init(&cpr_vreg->cpr_mutex);
  5201. rdesc = &cpr_vreg->rdesc;
  5202. rdesc->owner = THIS_MODULE;
  5203. rdesc->type = REGULATOR_VOLTAGE;
  5204. rdesc->ops = &cpr_corner_ops;
  5205. reg_config.dev = &pdev->dev;
  5206. reg_config.init_data = init_data;
  5207. reg_config.driver_data = cpr_vreg;
  5208. reg_config.of_node = pdev->dev.of_node;
  5209. cpr_vreg->rdev = regulator_register(rdesc, &reg_config);
  5210. if (IS_ERR(cpr_vreg->rdev)) {
  5211. rc = PTR_ERR(cpr_vreg->rdev);
  5212. cpr_err(cpr_vreg, "regulator_register failed: rc=%d\n", rc);
  5213. cpr_apc_exit(cpr_vreg);
  5214. return rc;
  5215. }
  5216. platform_set_drvdata(pdev, cpr_vreg);
  5217. cpr_debugfs_init(cpr_vreg);
  5218. /* Register panic notification call back */
  5219. cpr_vreg->panic_notifier.notifier_call = cpr_panic_callback;
  5220. atomic_notifier_chain_register(&panic_notifier_list,
  5221. &cpr_vreg->panic_notifier);
  5222. mutex_lock(&cpr_regulator_list_mutex);
  5223. list_add(&cpr_vreg->list, &cpr_regulator_list);
  5224. mutex_unlock(&cpr_regulator_list_mutex);
  5225. return 0;
  5226. err_out:
  5227. cpr_efuse_free(cpr_vreg);
  5228. return rc;
  5229. }
  5230. static int cpr_regulator_remove(struct platform_device *pdev)
  5231. {
  5232. struct cpr_regulator *cpr_vreg;
  5233. cpr_vreg = platform_get_drvdata(pdev);
  5234. if (cpr_vreg) {
  5235. /* Disable CPR */
  5236. if (cpr_is_allowed(cpr_vreg)) {
  5237. cpr_ctl_disable(cpr_vreg);
  5238. cpr_irq_set(cpr_vreg, 0);
  5239. }
  5240. mutex_lock(&cpr_regulator_list_mutex);
  5241. list_del(&cpr_vreg->list);
  5242. mutex_unlock(&cpr_regulator_list_mutex);
  5243. if (cpr_vreg->cpu_notifier.notifier_call)
  5244. unregister_hotcpu_notifier(&cpr_vreg->cpu_notifier);
  5245. atomic_notifier_chain_unregister(&panic_notifier_list,
  5246. &cpr_vreg->panic_notifier);
  5247. cpr_apc_exit(cpr_vreg);
  5248. cpr_debugfs_remove(cpr_vreg);
  5249. regulator_unregister(cpr_vreg->rdev);
  5250. }
  5251. return 0;
  5252. }
  5253. static struct of_device_id cpr_regulator_match_table[] = {
  5254. { .compatible = CPR_REGULATOR_DRIVER_NAME, },
  5255. {}
  5256. };
  5257. static struct platform_driver cpr_regulator_driver = {
  5258. .driver = {
  5259. .name = CPR_REGULATOR_DRIVER_NAME,
  5260. .of_match_table = cpr_regulator_match_table,
  5261. .owner = THIS_MODULE,
  5262. },
  5263. .probe = cpr_regulator_probe,
  5264. .remove = cpr_regulator_remove,
  5265. .suspend = cpr_regulator_suspend,
  5266. .resume = cpr_regulator_resume,
  5267. };
  5268. /**
  5269. * cpr_regulator_init() - register cpr-regulator driver
  5270. *
  5271. * This initialization function should be called in systems in which driver
  5272. * registration ordering must be controlled precisely.
  5273. */
  5274. int __init cpr_regulator_init(void)
  5275. {
  5276. static bool initialized;
  5277. if (initialized)
  5278. return 0;
  5279. else
  5280. initialized = true;
  5281. cpr_debugfs_base_init();
  5282. return platform_driver_register(&cpr_regulator_driver);
  5283. }
  5284. EXPORT_SYMBOL(cpr_regulator_init);
  5285. static void __exit cpr_regulator_exit(void)
  5286. {
  5287. platform_driver_unregister(&cpr_regulator_driver);
  5288. cpr_debugfs_base_remove();
  5289. }
  5290. MODULE_DESCRIPTION("CPR regulator driver");
  5291. MODULE_LICENSE("GPL v2");
  5292. arch_initcall(cpr_regulator_init);
  5293. module_exit(cpr_regulator_exit);