12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588 |
- /*
- * QTI Crypto driver
- *
- * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
- #include <linux/module.h>
- #include <linux/clk.h>
- #include <linux/cpu.h>
- #include <linux/types.h>
- #include <linux/platform_device.h>
- #include <linux/dma-mapping.h>
- #include <linux/dmapool.h>
- #include <linux/crypto.h>
- #include <linux/kernel.h>
- #include <linux/rtnetlink.h>
- #include <linux/interrupt.h>
- #include <linux/spinlock.h>
- #include <linux/llist.h>
- #include <linux/debugfs.h>
- #include <linux/workqueue.h>
- #include <linux/sched.h>
- #include <linux/init.h>
- #include <linux/cache.h>
- #include <linux/platform_data/qcom_crypto_device.h>
- #include <linux/msm-bus.h>
- #include <linux/hardirq.h>
- #include <linux/qcrypto.h>
- #include <crypto/ctr.h>
- #include <crypto/des.h>
- #include <crypto/aes.h>
- #include <crypto/sha.h>
- #include <crypto/hash.h>
- #include <crypto/algapi.h>
- #include <crypto/aead.h>
- #include <crypto/authenc.h>
- #include <crypto/scatterwalk.h>
- #include <crypto/skcipher.h>
- #include <crypto/internal/skcipher.h>
- #include <crypto/internal/hash.h>
- #include <crypto/internal/aead.h>
- #include <linux/fips_status.h>
- #include "qce.h"
- #define DEBUG_MAX_FNAME 16
- #define DEBUG_MAX_RW_BUF 4096
- #define QCRYPTO_BIG_NUMBER 9999999 /* a big number */
- /*
- * For crypto 5.0 which has burst size alignment requirement.
- */
- #define MAX_ALIGN_SIZE 0x40
- #define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
- /* Status of response workq */
- enum resp_workq_sts {
- NOT_SCHEDULED = 0,
- IS_SCHEDULED = 1,
- SCHEDULE_AGAIN = 2
- };
- /* Status of req processing by CEs */
- enum req_processing_sts {
- STOPPED = 0,
- IN_PROGRESS = 1
- };
- enum qcrypto_bus_state {
- BUS_NO_BANDWIDTH = 0,
- BUS_HAS_BANDWIDTH,
- BUS_BANDWIDTH_RELEASING,
- BUS_BANDWIDTH_ALLOCATING,
- BUS_SUSPENDED,
- BUS_SUSPENDING,
- };
- struct crypto_stat {
- u64 aead_sha1_aes_enc;
- u64 aead_sha1_aes_dec;
- u64 aead_sha1_des_enc;
- u64 aead_sha1_des_dec;
- u64 aead_sha1_3des_enc;
- u64 aead_sha1_3des_dec;
- u64 aead_sha256_aes_enc;
- u64 aead_sha256_aes_dec;
- u64 aead_sha256_des_enc;
- u64 aead_sha256_des_dec;
- u64 aead_sha256_3des_enc;
- u64 aead_sha256_3des_dec;
- u64 aead_ccm_aes_enc;
- u64 aead_ccm_aes_dec;
- u64 aead_rfc4309_ccm_aes_enc;
- u64 aead_rfc4309_ccm_aes_dec;
- u64 aead_op_success;
- u64 aead_op_fail;
- u64 aead_bad_msg;
- u64 ablk_cipher_aes_enc;
- u64 ablk_cipher_aes_dec;
- u64 ablk_cipher_des_enc;
- u64 ablk_cipher_des_dec;
- u64 ablk_cipher_3des_enc;
- u64 ablk_cipher_3des_dec;
- u64 ablk_cipher_op_success;
- u64 ablk_cipher_op_fail;
- u64 sha1_digest;
- u64 sha256_digest;
- u64 sha1_hmac_digest;
- u64 sha256_hmac_digest;
- u64 ahash_op_success;
- u64 ahash_op_fail;
- };
- static struct crypto_stat _qcrypto_stat;
- static struct dentry *_debug_dent;
- static char _debug_read_buf[DEBUG_MAX_RW_BUF];
- static bool _qcrypto_init_assign;
- struct crypto_priv;
- struct qcrypto_req_control {
- unsigned int index;
- bool in_use;
- struct crypto_engine *pce;
- struct crypto_async_request *req;
- struct qcrypto_resp_ctx *arsp;
- int res; /* execution result */
- };
- struct crypto_engine {
- struct list_head elist;
- void *qce; /* qce handle */
- struct platform_device *pdev; /* platform device */
- struct crypto_priv *pcp;
- uint32_t bus_scale_handle;
- struct crypto_queue req_queue; /*
- * request queue for those requests
- * that have this engine assigned
- * waiting to be executed
- */
- u64 total_req;
- u64 err_req;
- u32 unit;
- u32 ce_device;
- u32 ce_hw_instance;
- unsigned int signature;
- enum qcrypto_bus_state bw_state;
- bool high_bw_req;
- struct timer_list bw_reaper_timer;
- struct work_struct bw_reaper_ws;
- struct work_struct bw_allocate_ws;
- /* engine execution sequence number */
- u32 active_seq;
- /* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
- u32 last_active_seq;
- bool check_flag;
- /*Added to support multi-requests*/
- unsigned int max_req;
- struct qcrypto_req_control *preq_pool;
- atomic_t req_count;
- bool issue_req; /* an request is being issued to qce */
- bool first_engine; /* this engine is the first engine or not */
- unsigned int irq_cpu; /* the cpu running the irq of this engine */
- unsigned int max_req_used; /* debug stats */
- };
- #define MAX_SMP_CPU 8
- struct crypto_priv {
- /* CE features supported by target device*/
- struct msm_ce_hw_support platform_support;
- /* CE features/algorithms supported by HW engine*/
- struct ce_hw_support ce_support;
- /* the lock protects crypto queue and req */
- spinlock_t lock;
- /* list of registered algorithms */
- struct list_head alg_list;
- /* current active request */
- struct crypto_async_request *req;
- struct work_struct unlock_ce_ws;
- struct list_head engine_list; /* list of qcrypto engines */
- int32_t total_units; /* total units of engines */
- struct mutex engine_lock;
- struct crypto_engine *next_engine; /* next assign engine */
- struct crypto_queue req_queue; /*
- * request queue for those requests
- * that waiting for an available
- * engine.
- */
- struct llist_head ordered_resp_list; /* Queue to maintain
- * responses in sequence.
- */
- atomic_t resp_cnt;
- struct workqueue_struct *resp_wq;
- struct work_struct resp_work; /*
- * Workq to send responses
- * in sequence.
- */
- enum resp_workq_sts sched_resp_workq_status;
- enum req_processing_sts ce_req_proc_sts;
- int cpu_getting_irqs_frm_first_ce;
- struct crypto_engine *first_engine;
- struct crypto_engine *scheduled_eng; /* last engine scheduled */
- /* debug stats */
- unsigned int no_avail;
- unsigned int resp_stop;
- unsigned int resp_start;
- unsigned int max_qlen;
- unsigned int queue_work_eng3;
- unsigned int queue_work_not_eng3;
- unsigned int queue_work_not_eng3_nz;
- unsigned int max_resp_qlen;
- unsigned int max_reorder_cnt;
- unsigned int cpu_req[MAX_SMP_CPU+1];
- };
- static struct crypto_priv qcrypto_dev;
- static struct crypto_engine *_qcrypto_static_assign_engine(
- struct crypto_priv *cp);
- static struct crypto_engine *_avail_eng(struct crypto_priv *cp);
- static struct qcrypto_req_control *qcrypto_alloc_req_control(
- struct crypto_engine *pce)
- {
- int i;
- struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
- unsigned int req_count;
- for (i = 0; i < pce->max_req; i++) {
- if (xchg(&pqcrypto_req_control->in_use, true) == false) {
- req_count = atomic_inc_return(&pce->req_count);
- if (req_count > pce->max_req_used)
- pce->max_req_used = req_count;
- return pqcrypto_req_control;
- }
- pqcrypto_req_control++;
- }
- return NULL;
- }
- static void qcrypto_free_req_control(struct crypto_engine *pce,
- struct qcrypto_req_control *preq)
- {
- /* do this before free req */
- preq->req = NULL;
- preq->arsp = NULL;
- /* free req */
- if (xchg(&preq->in_use, false) == false)
- pr_warn("request info %pK free already\n", preq);
- else
- atomic_dec(&pce->req_count);
- }
- static struct qcrypto_req_control *find_req_control_for_areq(
- struct crypto_engine *pce,
- struct crypto_async_request *areq)
- {
- int i;
- struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
- for (i = 0; i < pce->max_req; i++) {
- if (pqcrypto_req_control->req == areq)
- return pqcrypto_req_control;
- pqcrypto_req_control++;
- }
- return NULL;
- }
- static void qcrypto_init_req_control(struct crypto_engine *pce,
- struct qcrypto_req_control *pqcrypto_req_control)
- {
- int i;
- pce->preq_pool = pqcrypto_req_control;
- atomic_set(&pce->req_count, 0);
- for (i = 0; i < pce->max_req; i++) {
- pqcrypto_req_control->index = i;
- pqcrypto_req_control->in_use = false;
- pqcrypto_req_control->pce = pce;
- pqcrypto_req_control++;
- }
- }
- static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
- unsigned int device)
- {
- struct crypto_engine *entry = NULL;
- unsigned long flags;
- spin_lock_irqsave(&cp->lock, flags);
- list_for_each_entry(entry, &cp->engine_list, elist) {
- if (entry->ce_device == device)
- break;
- }
- spin_unlock_irqrestore(&cp->lock, flags);
- if (((entry != NULL) && (entry->ce_device != device)) ||
- (entry == NULL)) {
- pr_err("Device node for CE device %d NOT FOUND!!\n",
- device);
- return NULL;
- }
- return entry;
- }
- static struct crypto_engine *_qrypto_find_pengine_device_hw
- (struct crypto_priv *cp,
- u32 device,
- u32 hw_instance)
- {
- struct crypto_engine *entry = NULL;
- unsigned long flags;
- spin_lock_irqsave(&cp->lock, flags);
- list_for_each_entry(entry, &cp->engine_list, elist) {
- if ((entry->ce_device == device) &&
- (entry->ce_hw_instance == hw_instance))
- break;
- }
- spin_unlock_irqrestore(&cp->lock, flags);
- if (((entry != NULL) &&
- ((entry->ce_device != device)
- || (entry->ce_hw_instance != hw_instance)))
- || (entry == NULL)) {
- pr_err("Device node for CE device %d NOT FOUND!!\n",
- device);
- return NULL;
- }
- return entry;
- }
- int qcrypto_get_num_engines(void)
- {
- struct crypto_priv *cp = &qcrypto_dev;
- struct crypto_engine *entry = NULL;
- int count = 0;
- list_for_each_entry(entry, &cp->engine_list, elist) {
- count++;
- }
- return count;
- }
- EXPORT_SYMBOL(qcrypto_get_num_engines);
- void qcrypto_get_engine_list(size_t num_engines,
- struct crypto_engine_entry *arr)
- {
- struct crypto_priv *cp = &qcrypto_dev;
- struct crypto_engine *entry = NULL;
- size_t arr_index = 0;
- list_for_each_entry(entry, &cp->engine_list, elist) {
- arr[arr_index].ce_device = entry->ce_device;
- arr[arr_index].hw_instance = entry->ce_hw_instance;
- arr_index++;
- if (arr_index >= num_engines)
- break;
- }
- }
- EXPORT_SYMBOL(qcrypto_get_engine_list);
- enum qcrypto_alg_type {
- QCRYPTO_ALG_CIPHER = 0,
- QCRYPTO_ALG_SHA = 1,
- QCRYPTO_ALG_AEAD = 2,
- QCRYPTO_ALG_LAST
- };
- struct qcrypto_alg {
- struct list_head entry;
- struct crypto_alg cipher_alg;
- struct ahash_alg sha_alg;
- struct aead_alg aead_alg;
- enum qcrypto_alg_type alg_type;
- struct crypto_priv *cp;
- };
- #define QCRYPTO_MAX_KEY_SIZE 64
- /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
- #define QCRYPTO_MAX_IV_LENGTH 16
- #define QCRYPTO_CCM4309_NONCE_LEN 3
- struct qcrypto_cipher_ctx {
- struct list_head rsp_queue; /* response queue */
- struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
- struct crypto_priv *cp;
- unsigned int flags;
- enum qce_hash_alg_enum auth_alg; /* for aead */
- u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
- u8 iv[QCRYPTO_MAX_IV_LENGTH];
- u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
- unsigned int enc_key_len;
- unsigned int authsize;
- unsigned int auth_key_len;
- u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
- struct crypto_skcipher *cipher_aes192_fb;
- struct crypto_ahash *ahash_aead_aes192_fb;
- };
- struct qcrypto_resp_ctx {
- struct list_head list;
- struct llist_node llist;
- struct crypto_async_request *async_req; /* async req */
- int res; /* execution result */
- };
- struct qcrypto_cipher_req_ctx {
- struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
- struct crypto_engine *pengine; /* engine assigned to this request */
- u8 *iv;
- u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
- unsigned int ivsize;
- int aead;
- int ccmtype; /* default: 0, rfc4309: 1 */
- struct scatterlist asg; /* Formatted associated data sg */
- unsigned char *adata; /* Pointer to formatted assoc data */
- enum qce_cipher_alg_enum alg;
- enum qce_cipher_dir_enum dir;
- enum qce_cipher_mode_enum mode;
- struct scatterlist *orig_src; /* Original src sg ptr */
- struct scatterlist *orig_dst; /* Original dst sg ptr */
- struct scatterlist dsg; /* Dest Data sg */
- struct scatterlist ssg; /* Source Data sg */
- unsigned char *data; /* Incoming data pointer*/
- struct aead_request *aead_req;
- struct ahash_request *fb_hash_req;
- uint8_t fb_ahash_digest[SHA256_DIGEST_SIZE];
- struct scatterlist fb_ablkcipher_src_sg[2];
- struct scatterlist fb_ablkcipher_dst_sg[2];
- char *fb_aes_iv;
- unsigned int fb_ahash_length;
- struct skcipher_request *fb_aes_req;
- struct scatterlist *fb_aes_src;
- struct scatterlist *fb_aes_dst;
- unsigned int fb_aes_cryptlen;
- };
- #define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
- #define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32))
- #define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE
- #define MSM_QCRYPTO_REQ_QUEUE_LENGTH 768
- #define COMPLETION_CB_BACKLOG_LENGTH_STOP 400
- #define COMPLETION_CB_BACKLOG_LENGTH_START \
- (COMPLETION_CB_BACKLOG_LENGTH_STOP / 2)
- static uint8_t _std_init_vector_sha1_uint8[] = {
- 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
- 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
- 0xC3, 0xD2, 0xE1, 0xF0
- };
- /* standard initialization vector for SHA-256, source: FIPS 180-2 */
- static uint8_t _std_init_vector_sha256_uint8[] = {
- 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
- 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
- 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
- 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
- };
- struct qcrypto_sha_ctx {
- struct list_head rsp_queue; /* response queue */
- struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
- struct crypto_priv *cp;
- unsigned int flags;
- enum qce_hash_alg_enum alg;
- uint32_t diglen;
- uint32_t authkey_in_len;
- uint8_t authkey[SHA_MAX_BLOCK_SIZE];
- struct ahash_request *ahash_req;
- struct completion ahash_req_complete;
- };
- struct qcrypto_sha_req_ctx {
- struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
- struct crypto_engine *pengine; /* engine assigned to this request */
- struct scatterlist *src;
- uint32_t nbytes;
- struct scatterlist *orig_src; /* Original src sg ptr */
- struct scatterlist dsg; /* Data sg */
- unsigned char *data; /* Incoming data pointer*/
- unsigned char *data2; /* Updated data pointer*/
- uint32_t byte_count[4];
- u64 count;
- uint8_t first_blk;
- uint8_t last_blk;
- uint8_t trailing_buf[SHA_MAX_BLOCK_SIZE];
- uint32_t trailing_buf_len;
- /* dma buffer, Internal use */
- uint8_t staging_dmabuf
- [SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
- uint8_t digest[SHA_MAX_DIGEST_SIZE];
- struct scatterlist sg[2];
- };
- static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
- unsigned int len)
- {
- unsigned int n;
- n = len / sizeof(uint32_t);
- for (; n > 0; n--) {
- *iv = ((*b << 24) & 0xff000000) |
- (((*(b+1)) << 16) & 0xff0000) |
- (((*(b+2)) << 8) & 0xff00) |
- (*(b+3) & 0xff);
- b += sizeof(uint32_t);
- iv++;
- }
- n = len % sizeof(uint32_t);
- if (n == 3) {
- *iv = ((*b << 24) & 0xff000000) |
- (((*(b+1)) << 16) & 0xff0000) |
- (((*(b+2)) << 8) & 0xff00);
- } else if (n == 2) {
- *iv = ((*b << 24) & 0xff000000) |
- (((*(b+1)) << 16) & 0xff0000);
- } else if (n == 1) {
- *iv = ((*b << 24) & 0xff000000);
- }
- }
- static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
- unsigned int len)
- {
- unsigned int n = len / sizeof(uint32_t);
- for (; n > 0; n--) {
- *b++ = (unsigned char) ((*iv >> 24) & 0xff);
- *b++ = (unsigned char) ((*iv >> 16) & 0xff);
- *b++ = (unsigned char) ((*iv >> 8) & 0xff);
- *b++ = (unsigned char) (*iv & 0xff);
- iv++;
- }
- n = len % sizeof(uint32_t);
- if (n == 3) {
- *b++ = (unsigned char) ((*iv >> 24) & 0xff);
- *b++ = (unsigned char) ((*iv >> 16) & 0xff);
- *b = (unsigned char) ((*iv >> 8) & 0xff);
- } else if (n == 2) {
- *b++ = (unsigned char) ((*iv >> 24) & 0xff);
- *b = (unsigned char) ((*iv >> 16) & 0xff);
- } else if (n == 1) {
- *b = (unsigned char) ((*iv >> 24) & 0xff);
- }
- }
- static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
- bool high_bw_req)
- {
- struct crypto_priv *cp = pengine->pcp;
- unsigned int control_flag;
- int ret = 0;
- if (cp->ce_support.req_bw_before_clk) {
- if (high_bw_req)
- control_flag = QCE_BW_REQUEST_FIRST;
- else
- control_flag = QCE_CLK_DISABLE_FIRST;
- } else {
- if (high_bw_req)
- control_flag = QCE_CLK_ENABLE_FIRST;
- else
- control_flag = QCE_BW_REQUEST_RESET_FIRST;
- }
- switch (control_flag) {
- case QCE_CLK_ENABLE_FIRST:
- ret = qce_enable_clk(pengine->qce);
- if (ret) {
- pr_err("%s Unable enable clk\n", __func__);
- return;
- }
- ret = msm_bus_scale_client_update_request(
- pengine->bus_scale_handle, 1);
- if (ret) {
- pr_err("%s Unable to set high bw\n", __func__);
- ret = qce_disable_clk(pengine->qce);
- if (ret)
- pr_err("%s Unable disable clk\n", __func__);
- return;
- }
- break;
- case QCE_BW_REQUEST_FIRST:
- ret = msm_bus_scale_client_update_request(
- pengine->bus_scale_handle, 1);
- if (ret) {
- pr_err("%s Unable to set high bw\n", __func__);
- return;
- }
- ret = qce_enable_clk(pengine->qce);
- if (ret) {
- pr_err("%s Unable enable clk\n", __func__);
- ret = msm_bus_scale_client_update_request(
- pengine->bus_scale_handle, 0);
- if (ret)
- pr_err("%s Unable to set low bw\n", __func__);
- return;
- }
- break;
- case QCE_CLK_DISABLE_FIRST:
- ret = qce_disable_clk(pengine->qce);
- if (ret) {
- pr_err("%s Unable to disable clk\n", __func__);
- return;
- }
- ret = msm_bus_scale_client_update_request(
- pengine->bus_scale_handle, 0);
- if (ret) {
- pr_err("%s Unable to set low bw\n", __func__);
- ret = qce_enable_clk(pengine->qce);
- if (ret)
- pr_err("%s Unable enable clk\n", __func__);
- return;
- }
- break;
- case QCE_BW_REQUEST_RESET_FIRST:
- ret = msm_bus_scale_client_update_request(
- pengine->bus_scale_handle, 0);
- if (ret) {
- pr_err("%s Unable to set low bw\n", __func__);
- return;
- }
- ret = qce_disable_clk(pengine->qce);
- if (ret) {
- pr_err("%s Unable to disable clk\n", __func__);
- ret = msm_bus_scale_client_update_request(
- pengine->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set high bw\n", __func__);
- return;
- }
- break;
- default:
- return;
- }
- }
- static void qcrypto_bw_reaper_timer_callback(unsigned long data)
- {
- struct crypto_engine *pengine = (struct crypto_engine *)data;
- schedule_work(&pengine->bw_reaper_ws);
- }
- static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
- {
- pengine->bw_reaper_timer.data =
- (unsigned long)(pengine);
- pengine->bw_reaper_timer.expires = jiffies +
- msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
- mod_timer(&(pengine->bw_reaper_timer),
- pengine->bw_reaper_timer.expires);
- }
- static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
- {
- schedule_work(&pengine->bw_allocate_ws);
- }
- static int _start_qcrypto_process(struct crypto_priv *cp,
- struct crypto_engine *pengine);
- static void qcrypto_bw_allocate_work(struct work_struct *work)
- {
- struct crypto_engine *pengine = container_of(work,
- struct crypto_engine, bw_allocate_ws);
- unsigned long flags;
- struct crypto_priv *cp = pengine->pcp;
- spin_lock_irqsave(&cp->lock, flags);
- pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
- spin_unlock_irqrestore(&cp->lock, flags);
- qcrypto_ce_set_bus(pengine, true);
- qcrypto_bw_set_timeout(pengine);
- spin_lock_irqsave(&cp->lock, flags);
- pengine->bw_state = BUS_HAS_BANDWIDTH;
- pengine->high_bw_req = false;
- pengine->active_seq++;
- pengine->check_flag = true;
- spin_unlock_irqrestore(&cp->lock, flags);
- _start_qcrypto_process(cp, pengine);
- };
- static void qcrypto_bw_reaper_work(struct work_struct *work)
- {
- struct crypto_engine *pengine = container_of(work,
- struct crypto_engine, bw_reaper_ws);
- struct crypto_priv *cp = pengine->pcp;
- unsigned long flags;
- u32 active_seq;
- bool restart = false;
- spin_lock_irqsave(&cp->lock, flags);
- active_seq = pengine->active_seq;
- if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
- (active_seq == pengine->last_active_seq)) {
- /* check if engine is stuck */
- if (atomic_read(&pengine->req_count) > 0) {
- if (pengine->check_flag)
- dev_warn(&pengine->pdev->dev,
- "The engine appears to be stuck seq %d.\n",
- active_seq);
- pengine->check_flag = false;
- goto ret;
- }
- if (cp->platform_support.bus_scale_table == NULL)
- goto ret;
- pengine->bw_state = BUS_BANDWIDTH_RELEASING;
- spin_unlock_irqrestore(&cp->lock, flags);
- qcrypto_ce_set_bus(pengine, false);
- spin_lock_irqsave(&cp->lock, flags);
- if (pengine->high_bw_req == true) {
- /* we got request while we are disabling clock */
- pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
- spin_unlock_irqrestore(&cp->lock, flags);
- qcrypto_ce_set_bus(pengine, true);
- spin_lock_irqsave(&cp->lock, flags);
- pengine->bw_state = BUS_HAS_BANDWIDTH;
- pengine->high_bw_req = false;
- restart = true;
- } else
- pengine->bw_state = BUS_NO_BANDWIDTH;
- }
- ret:
- pengine->last_active_seq = active_seq;
- spin_unlock_irqrestore(&cp->lock, flags);
- if (restart)
- _start_qcrypto_process(cp, pengine);
- if (pengine->bw_state != BUS_NO_BANDWIDTH)
- qcrypto_bw_set_timeout(pengine);
- }
- static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
- {
- int i;
- for (i = 0; nbytes > 0 && sg != NULL; i++, sg = sg_next(sg))
- nbytes -= sg->length;
- return i;
- }
- static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl,
- unsigned int nents, void *buf, size_t buflen)
- {
- int i;
- size_t offset, len;
- for (i = 0, offset = 0; i < nents; ++i) {
- len = sg_copy_from_buffer(sgl, 1, buf, buflen);
- buf += len;
- buflen -= len;
- offset += len;
- sgl = sg_next(sgl);
- }
- return offset;
- }
- static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl,
- unsigned int nents, void *buf, size_t buflen)
- {
- int i;
- size_t offset, len;
- for (i = 0, offset = 0; i < nents; ++i) {
- len = sg_copy_to_buffer(sgl, 1, buf, buflen);
- buf += len;
- buflen -= len;
- offset += len;
- sgl = sg_next(sgl);
- }
- return offset;
- }
- static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
- struct ahash_alg *template)
- {
- struct qcrypto_alg *q_alg;
- q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
- if (!q_alg)
- return ERR_PTR(-ENOMEM);
- q_alg->alg_type = QCRYPTO_ALG_SHA;
- q_alg->sha_alg = *template;
- q_alg->cp = cp;
- return q_alg;
- };
- static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
- struct crypto_alg *template)
- {
- struct qcrypto_alg *q_alg;
- q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
- if (!q_alg)
- return ERR_PTR(-ENOMEM);
- q_alg->alg_type = QCRYPTO_ALG_CIPHER;
- q_alg->cipher_alg = *template;
- q_alg->cp = cp;
- return q_alg;
- };
- static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp,
- struct aead_alg *template)
- {
- struct qcrypto_alg *q_alg;
- q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
- if (!q_alg)
- return ERR_PTR(-ENOMEM);
- q_alg->alg_type = QCRYPTO_ALG_AEAD;
- q_alg->aead_alg = *template;
- q_alg->cp = cp;
- return q_alg;
- };
- static int _qcrypto_cipher_ctx_init(struct qcrypto_cipher_ctx *ctx,
- struct qcrypto_alg *q_alg)
- {
- if (!ctx || !q_alg) {
- pr_err("ctx or q_alg is NULL\n");
- return -EINVAL;
- }
- ctx->flags = 0;
- /* update context with ptr to cp */
- ctx->cp = q_alg->cp;
- /* random first IV */
- get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
- if (_qcrypto_init_assign) {
- ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
- if (ctx->pengine == NULL)
- return -ENODEV;
- } else
- ctx->pengine = NULL;
- INIT_LIST_HEAD(&ctx->rsp_queue);
- ctx->auth_alg = QCE_HASH_LAST;
- return 0;
- }
- static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
- {
- struct crypto_alg *alg = tfm->__crt_alg;
- struct qcrypto_alg *q_alg;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
- return _qcrypto_cipher_ctx_init(ctx, q_alg);
- };
- static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
- {
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
- struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
- struct ahash_alg, halg);
- struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
- sha_alg);
- crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
- /* update context with ptr to cp */
- sha_ctx->cp = q_alg->cp;
- sha_ctx->flags = 0;
- sha_ctx->ahash_req = NULL;
- if (_qcrypto_init_assign) {
- sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
- if (sha_ctx->pengine == NULL)
- return -ENODEV;
- } else
- sha_ctx->pengine = NULL;
- INIT_LIST_HEAD(&sha_ctx->rsp_queue);
- return 0;
- };
- static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
- if (!list_empty(&sha_ctx->rsp_queue))
- pr_err("_qcrypto_ahash_cra_exit: requests still outstanding");
- if (sha_ctx->ahash_req != NULL) {
- ahash_request_free(sha_ctx->ahash_req);
- sha_ctx->ahash_req = NULL;
- }
- };
- static void _crypto_sha_hmac_ahash_req_complete(
- struct crypto_async_request *req, int err);
- static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
- {
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
- int ret = 0;
- ret = _qcrypto_ahash_cra_init(tfm);
- if (ret)
- return ret;
- sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
- if (sha_ctx->ahash_req == NULL) {
- _qcrypto_ahash_cra_exit(tfm);
- return -ENOMEM;
- }
- init_completion(&sha_ctx->ahash_req_complete);
- ahash_request_set_callback(sha_ctx->ahash_req,
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- _crypto_sha_hmac_ahash_req_complete,
- &sha_ctx->ahash_req_complete);
- crypto_ahash_clear_flags(ahash, ~0);
- return 0;
- };
- static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
- {
- tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
- return _qcrypto_cipher_cra_init(tfm);
- };
- static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
- {
- const char *name = tfm->__crt_alg->cra_name;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret;
- struct crypto_priv *cp = &qcrypto_dev;
- if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
- ctx->cipher_aes192_fb = NULL;
- return _qcrypto_cra_ablkcipher_init(tfm);
- }
- ctx->cipher_aes192_fb = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->cipher_aes192_fb)) {
- pr_err("Error allocating fallback algo %s\n", name);
- ret = PTR_ERR(ctx->cipher_aes192_fb);
- ctx->cipher_aes192_fb = NULL;
- return ret;
- }
- return _qcrypto_cra_ablkcipher_init(tfm);
- };
- static int _qcrypto_aead_cra_init(struct crypto_aead *tfm)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_alg *aeadalg = crypto_aead_alg(tfm);
- struct qcrypto_alg *q_alg = container_of(aeadalg, struct qcrypto_alg,
- aead_alg);
- return _qcrypto_cipher_ctx_init(ctx, q_alg);
- };
- static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm)
- {
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
- rc = _qcrypto_aead_cra_init(tfm);
- ctx->auth_alg = QCE_HASH_SHA1_HMAC;
- return rc;
- }
- static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm)
- {
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
- rc = _qcrypto_aead_cra_init(tfm);
- ctx->auth_alg = QCE_HASH_SHA256_HMAC;
- return rc;
- }
- static int _qcrypto_cra_aead_ccm_init(struct crypto_aead *tfm)
- {
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
- rc = _qcrypto_aead_cra_init(tfm);
- ctx->auth_alg = QCE_HASH_AES_CMAC;
- return rc;
- }
- static int _qcrypto_cra_aead_rfc4309_ccm_init(struct crypto_aead *tfm)
- {
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
- rc = _qcrypto_aead_cra_init(tfm);
- ctx->auth_alg = QCE_HASH_AES_CMAC;
- return rc;
- }
- static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm)
- {
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- struct crypto_priv *cp = &qcrypto_dev;
- crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
- rc = _qcrypto_aead_cra_init(tfm);
- if (rc)
- return rc;
- ctx->cipher_aes192_fb = NULL;
- ctx->ahash_aead_aes192_fb = NULL;
- if (!cp->ce_support.aes_key_192) {
- ctx->cipher_aes192_fb = crypto_alloc_skcipher(
- "cbc(aes)", 0, 0);
- if (IS_ERR(ctx->cipher_aes192_fb)) {
- ctx->cipher_aes192_fb = NULL;
- } else {
- ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
- "hmac(sha1)", 0, 0);
- if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
- ctx->ahash_aead_aes192_fb = NULL;
- crypto_free_skcipher(ctx->cipher_aes192_fb);
- ctx->cipher_aes192_fb = NULL;
- }
- }
- }
- ctx->auth_alg = QCE_HASH_SHA1_HMAC;
- return 0;
- }
- static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm)
- {
- int rc;
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- struct crypto_priv *cp = &qcrypto_dev;
- crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
- rc = _qcrypto_aead_cra_init(tfm);
- if (rc)
- return rc;
- ctx->cipher_aes192_fb = NULL;
- ctx->ahash_aead_aes192_fb = NULL;
- if (!cp->ce_support.aes_key_192) {
- ctx->cipher_aes192_fb = crypto_alloc_skcipher(
- "cbc(aes)", 0, 0);
- if (IS_ERR(ctx->cipher_aes192_fb)) {
- ctx->cipher_aes192_fb = NULL;
- } else {
- ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
- "hmac(sha256)", 0, 0);
- if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
- ctx->ahash_aead_aes192_fb = NULL;
- crypto_free_skcipher(ctx->cipher_aes192_fb);
- ctx->cipher_aes192_fb = NULL;
- }
- }
- }
- ctx->auth_alg = QCE_HASH_SHA256_HMAC;
- return 0;
- }
- static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!list_empty(&ctx->rsp_queue))
- pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
- };
- static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- _qcrypto_cra_ablkcipher_exit(tfm);
- if (ctx->cipher_aes192_fb)
- crypto_free_skcipher(ctx->cipher_aes192_fb);
- ctx->cipher_aes192_fb = NULL;
- }
- static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- if (!list_empty(&ctx->rsp_queue))
- pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
- }
- static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- if (!list_empty(&ctx->rsp_queue))
- pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
- if (ctx->cipher_aes192_fb)
- crypto_free_skcipher(ctx->cipher_aes192_fb);
- if (ctx->ahash_aead_aes192_fb)
- crypto_free_ahash(ctx->ahash_aead_aes192_fb);
- ctx->cipher_aes192_fb = NULL;
- ctx->ahash_aead_aes192_fb = NULL;
- }
- static int _disp_stats(int id)
- {
- struct crypto_stat *pstat;
- int len = 0;
- unsigned long flags;
- struct crypto_priv *cp = &qcrypto_dev;
- struct crypto_engine *pe;
- int i;
- pstat = &_qcrypto_stat;
- len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
- "\nQTI crypto accelerator %d Statistics\n",
- id + 1);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER AES encryption : %llu\n",
- pstat->ablk_cipher_aes_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER AES decryption : %llu\n",
- pstat->ablk_cipher_aes_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER DES encryption : %llu\n",
- pstat->ablk_cipher_des_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER DES decryption : %llu\n",
- pstat->ablk_cipher_des_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER 3DES encryption : %llu\n",
- pstat->ablk_cipher_3des_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER 3DES decryption : %llu\n",
- pstat->ablk_cipher_3des_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER operation success : %llu\n",
- pstat->ablk_cipher_op_success);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " ABLK CIPHER operation fail : %llu\n",
- pstat->ablk_cipher_op_fail);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- "\n");
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-AES encryption : %llu\n",
- pstat->aead_sha1_aes_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-AES decryption : %llu\n",
- pstat->aead_sha1_aes_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-DES encryption : %llu\n",
- pstat->aead_sha1_des_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-DES decryption : %llu\n",
- pstat->aead_sha1_des_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-3DES encryption : %llu\n",
- pstat->aead_sha1_3des_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA1-3DES decryption : %llu\n",
- pstat->aead_sha1_3des_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA256-AES encryption : %llu\n",
- pstat->aead_sha256_aes_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA256-AES decryption : %llu\n",
- pstat->aead_sha256_aes_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA256-DES encryption : %llu\n",
- pstat->aead_sha256_des_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA256-DES decryption : %llu\n",
- pstat->aead_sha256_des_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA256-3DES encryption : %llu\n",
- pstat->aead_sha256_3des_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD SHA256-3DES decryption : %llu\n",
- pstat->aead_sha256_3des_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD CCM-AES encryption : %llu\n",
- pstat->aead_ccm_aes_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD CCM-AES decryption : %llu\n",
- pstat->aead_ccm_aes_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD RFC4309-CCM-AES encryption : %llu\n",
- pstat->aead_rfc4309_ccm_aes_enc);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD RFC4309-CCM-AES decryption : %llu\n",
- pstat->aead_rfc4309_ccm_aes_dec);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD operation success : %llu\n",
- pstat->aead_op_success);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD operation fail : %llu\n",
- pstat->aead_op_fail);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AEAD bad message : %llu\n",
- pstat->aead_bad_msg);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- "\n");
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AHASH SHA1 digest : %llu\n",
- pstat->sha1_digest);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AHASH SHA256 digest : %llu\n",
- pstat->sha256_digest);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AHASH SHA1 HMAC digest : %llu\n",
- pstat->sha1_hmac_digest);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AHASH SHA256 HMAC digest : %llu\n",
- pstat->sha256_hmac_digest);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AHASH operation success : %llu\n",
- pstat->ahash_op_success);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " AHASH operation fail : %llu\n",
- pstat->ahash_op_fail);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n",
- cp->resp_start, cp->resp_stop,
- cp->max_resp_qlen, cp->max_reorder_cnt);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " max queue legnth, no avail : %u %u\n",
- cp->max_qlen, cp->no_avail);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- " work queue : %u %u %u\n",
- cp->queue_work_eng3,
- cp->queue_work_not_eng3,
- cp->queue_work_not_eng3_nz);
- len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
- "\n");
- spin_lock_irqsave(&cp->lock, flags);
- list_for_each_entry(pe, &cp->engine_list, elist) {
- len += scnprintf(
- _debug_read_buf + len,
- DEBUG_MAX_RW_BUF - len - 1,
- " Engine %4d Req max %d : %llu\n",
- pe->unit,
- pe->max_req_used,
- pe->total_req
- );
- len += scnprintf(
- _debug_read_buf + len,
- DEBUG_MAX_RW_BUF - len - 1,
- " Engine %4d Req Error : %llu\n",
- pe->unit,
- pe->err_req
- );
- qce_get_driver_stats(pe->qce);
- }
- spin_unlock_irqrestore(&cp->lock, flags);
- for (i = 0; i < MAX_SMP_CPU+1; i++)
- if (cp->cpu_req[i])
- len += scnprintf(
- _debug_read_buf + len,
- DEBUG_MAX_RW_BUF - len - 1,
- "CPU %d Issue Req : %d\n",
- i, cp->cpu_req[i]);
- return len;
- }
- static void _qcrypto_remove_engine(struct crypto_engine *pengine)
- {
- struct crypto_priv *cp;
- struct qcrypto_alg *q_alg;
- struct qcrypto_alg *n;
- unsigned long flags;
- struct crypto_engine *pe;
- cp = pengine->pcp;
- spin_lock_irqsave(&cp->lock, flags);
- list_del(&pengine->elist);
- if (pengine->first_engine) {
- cp->first_engine = NULL;
- pe = list_first_entry(&cp->engine_list, struct crypto_engine,
- elist);
- if (pe) {
- pe->first_engine = true;
- cp->first_engine = pe;
- }
- }
- if (cp->next_engine == pengine)
- cp->next_engine = NULL;
- if (cp->scheduled_eng == pengine)
- cp->scheduled_eng = NULL;
- spin_unlock_irqrestore(&cp->lock, flags);
- cp->total_units--;
- cancel_work_sync(&pengine->bw_reaper_ws);
- cancel_work_sync(&pengine->bw_allocate_ws);
- del_timer_sync(&pengine->bw_reaper_timer);
- if (pengine->bus_scale_handle != 0)
- msm_bus_scale_unregister_client(pengine->bus_scale_handle);
- pengine->bus_scale_handle = 0;
- kzfree(pengine->preq_pool);
- if (cp->total_units)
- return;
- list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
- if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
- crypto_unregister_alg(&q_alg->cipher_alg);
- if (q_alg->alg_type == QCRYPTO_ALG_SHA)
- crypto_unregister_ahash(&q_alg->sha_alg);
- if (q_alg->alg_type == QCRYPTO_ALG_AEAD)
- crypto_unregister_aead(&q_alg->aead_alg);
- list_del(&q_alg->entry);
- kzfree(q_alg);
- }
- }
- static int _qcrypto_remove(struct platform_device *pdev)
- {
- struct crypto_engine *pengine;
- struct crypto_priv *cp;
- pengine = platform_get_drvdata(pdev);
- if (!pengine)
- return 0;
- cp = pengine->pcp;
- mutex_lock(&cp->engine_lock);
- _qcrypto_remove_engine(pengine);
- mutex_unlock(&cp->engine_lock);
- if (pengine->qce)
- qce_close(pengine->qce);
- kzfree(pengine);
- return 0;
- }
- static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
- struct crypto_priv *cp, unsigned int len)
- {
- switch (len) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_256:
- break;
- case AES_KEYSIZE_192:
- if (cp->ce_support.aes_key_192)
- break;
- default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- };
- return 0;
- }
- static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher,
- const u8 *key)
- {
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret;
- ctx->enc_key_len = AES_KEYSIZE_192;
- ctx->cipher_aes192_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->cipher_aes192_fb->base.crt_flags |=
- (cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb, key,
- AES_KEYSIZE_192);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (cipher->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
- return ret;
- }
- static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int len)
- {
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_priv *cp = ctx->cp;
- if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
- return 0;
- if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
- && ctx->cipher_aes192_fb)
- return _qcrypto_setkey_aes_192_fallback(cipher, key);
- if (_qcrypto_check_aes_keylen(cipher, cp, len))
- return -EINVAL;
- ctx->enc_key_len = len;
- if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
- if (key != NULL) {
- memcpy(ctx->enc_key, key, len);
- } else {
- pr_err("%s Inavlid key pointer\n", __func__);
- return -EINVAL;
- }
- }
- return 0;
- };
- static int _qcrypto_setkey_aes_xts(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int len)
- {
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_priv *cp = ctx->cp;
- if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
- return 0;
- if (_qcrypto_check_aes_keylen(cipher, cp, len/2))
- return -EINVAL;
- ctx->enc_key_len = len;
- if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
- if (key != NULL) {
- memcpy(ctx->enc_key, key, len);
- } else {
- pr_err("%s Inavlid key pointer\n", __func__);
- return -EINVAL;
- }
- }
- return 0;
- };
- static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int len)
- {
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 tmp[DES_EXPKEY_WORDS];
- int ret;
- if (!key) {
- pr_err("%s Inavlid key pointer\n", __func__);
- return -EINVAL;
- }
- ret = des_ekey(tmp, key);
- if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
- pr_err("%s HW KEY usage not supported for DES algorithm\n",
- __func__);
- return 0;
- };
- if (len != DES_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- };
- if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
- ctx->enc_key_len = len;
- if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))
- memcpy(ctx->enc_key, key, len);
- return 0;
- };
- static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int len)
- {
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
- pr_err("%s HW KEY usage not supported for 3DES algorithm\n",
- __func__);
- return 0;
- };
- if (len != DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- };
- ctx->enc_key_len = len;
- if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
- if (key != NULL) {
- memcpy(ctx->enc_key, key, len);
- } else {
- pr_err("%s Inavlid key pointer\n", __func__);
- return -EINVAL;
- }
- }
- return 0;
- };
- static void seq_response(struct work_struct *work)
- {
- struct crypto_priv *cp = container_of(work, struct crypto_priv,
- resp_work);
- struct llist_node *list;
- struct llist_node *rev = NULL;
- struct crypto_engine *pengine;
- unsigned long flags;
- int total_unit;
- again:
- list = llist_del_all(&cp->ordered_resp_list);
- if (!list)
- goto end;
- while (list) {
- struct llist_node *t = list;
- list = llist_next(list);
- t->next = rev;
- rev = t;
- }
- while (rev) {
- struct qcrypto_resp_ctx *arsp;
- struct crypto_async_request *areq;
- arsp = container_of(rev, struct qcrypto_resp_ctx, llist);
- rev = llist_next(rev);
- areq = arsp->async_req;
- local_bh_disable();
- areq->complete(areq, arsp->res);
- local_bh_enable();
- atomic_dec(&cp->resp_cnt);
- }
- if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START &&
- (cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS)
- == STOPPED)) {
- cp->resp_start++;
- for (total_unit = cp->total_units; total_unit-- > 0;) {
- spin_lock_irqsave(&cp->lock, flags);
- pengine = _avail_eng(cp);
- spin_unlock_irqrestore(&cp->lock, flags);
- if (pengine)
- _start_qcrypto_process(cp, pengine);
- else
- break;
- }
- }
- end:
- if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN,
- IS_SCHEDULED) == SCHEDULE_AGAIN)
- goto again;
- else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
- NOT_SCHEDULED) == SCHEDULE_AGAIN)
- goto end;
- }
- #define SCHEUDLE_RSP_QLEN_THRESHOLD 64
- static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type,
- void *tfm_ctx,
- struct qcrypto_resp_ctx *cur_arsp,
- int res)
- {
- struct crypto_priv *cp = pengine->pcp;
- unsigned long flags;
- struct qcrypto_resp_ctx *arsp;
- struct list_head *plist;
- unsigned int resp_qlen;
- unsigned int cnt = 0;
- switch (type) {
- case CRYPTO_ALG_TYPE_AHASH:
- plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- case CRYPTO_ALG_TYPE_AEAD:
- default:
- plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
- break;
- }
- spin_lock_irqsave(&cp->lock, flags);
- cur_arsp->res = res;
- while (!list_empty(plist)) {
- arsp = list_first_entry(plist,
- struct qcrypto_resp_ctx, list);
- if (arsp->res == -EINPROGRESS)
- break;
- list_del(&arsp->list);
- llist_add(&arsp->llist, &cp->ordered_resp_list);
- atomic_inc(&cp->resp_cnt);
- cnt++;
- }
- resp_qlen = atomic_read(&cp->resp_cnt);
- if (resp_qlen > cp->max_resp_qlen)
- cp->max_resp_qlen = resp_qlen;
- if (cnt > cp->max_reorder_cnt)
- cp->max_reorder_cnt = cnt;
- if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) &&
- cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS,
- STOPPED) == IN_PROGRESS) {
- cp->resp_stop++;
- }
- spin_unlock_irqrestore(&cp->lock, flags);
- retry:
- if (!llist_empty(&cp->ordered_resp_list)) {
- unsigned int cpu;
- if (pengine->first_engine) {
- cpu = WORK_CPU_UNBOUND;
- cp->queue_work_eng3++;
- } else {
- cp->queue_work_not_eng3++;
- cpu = cp->cpu_getting_irqs_frm_first_ce;
- /*
- * If source not the first engine, and there
- * are outstanding requests going on first engine,
- * skip scheduling of work queue to anticipate
- * more may be coming. If the response queue
- * length exceeds threshold, to avoid further
- * delay, schedule work queue immediately.
- */
- if (cp->first_engine && atomic_read(
- &cp->first_engine->req_count)) {
- if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD)
- return;
- cp->queue_work_not_eng3_nz++;
- }
- }
- if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED,
- IS_SCHEDULED) == NOT_SCHEDULED)
- queue_work_on(cpu, cp->resp_wq, &cp->resp_work);
- else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
- SCHEDULE_AGAIN) == NOT_SCHEDULED)
- goto retry;
- }
- }
- static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
- {
- struct crypto_engine *pengine;
- struct crypto_async_request *areq;
- struct crypto_priv *cp;
- struct qcrypto_resp_ctx *arsp;
- u32 type = 0;
- void *tfm_ctx = NULL;
- unsigned int cpu;
- int res;
- pengine = pqcrypto_req_control->pce;
- cp = pengine->pcp;
- areq = pqcrypto_req_control->req;
- arsp = pqcrypto_req_control->arsp;
- res = pqcrypto_req_control->res;
- qcrypto_free_req_control(pengine, pqcrypto_req_control);
- if (areq) {
- type = crypto_tfm_alg_type(areq->tfm);
- tfm_ctx = crypto_tfm_ctx(areq->tfm);
- }
- cpu = smp_processor_id();
- pengine->irq_cpu = cpu;
- if (pengine->first_engine) {
- if (cpu != cp->cpu_getting_irqs_frm_first_ce)
- cp->cpu_getting_irqs_frm_first_ce = cpu;
- }
- if (areq)
- _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res);
- if (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)
- _start_qcrypto_process(cp, pengine);
- }
- static void _qce_ahash_complete(void *cookie, unsigned char *digest,
- unsigned char *authdata, int ret)
- {
- struct ahash_request *areq = (struct ahash_request *) cookie;
- struct crypto_async_request *async_req;
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
- struct crypto_priv *cp = sha_ctx->cp;
- struct crypto_stat *pstat;
- uint32_t diglen = crypto_ahash_digestsize(ahash);
- uint32_t *auth32 = (uint32_t *)authdata;
- struct crypto_engine *pengine;
- struct qcrypto_req_control *pqcrypto_req_control;
- async_req = &areq->base;
- pstat = &_qcrypto_stat;
- pengine = rctx->pengine;
- pqcrypto_req_control = find_req_control_for_areq(pengine,
- async_req);
- if (pqcrypto_req_control == NULL) {
- pr_err("async request not found\n");
- return;
- }
- #ifdef QCRYPTO_DEBUG
- dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %pK ret %d\n",
- areq, ret);
- #endif
- if (digest) {
- memcpy(rctx->digest, digest, diglen);
- if (rctx->last_blk)
- memcpy(areq->result, digest, diglen);
- }
- if (authdata) {
- rctx->byte_count[0] = auth32[0];
- rctx->byte_count[1] = auth32[1];
- rctx->byte_count[2] = auth32[2];
- rctx->byte_count[3] = auth32[3];
- }
- areq->src = rctx->src;
- areq->nbytes = rctx->nbytes;
- rctx->last_blk = 0;
- rctx->first_blk = 0;
- if (ret) {
- pqcrypto_req_control->res = -ENXIO;
- pstat->ahash_op_fail++;
- } else {
- pqcrypto_req_control->res = 0;
- pstat->ahash_op_success++;
- }
- if (cp->ce_support.aligned_only) {
- areq->src = rctx->orig_src;
- kfree(rctx->data);
- }
- req_done(pqcrypto_req_control);
- };
- static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
- unsigned char *iv, int ret)
- {
- struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
- struct crypto_async_request *async_req;
- struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- struct qcrypto_cipher_req_ctx *rctx;
- struct crypto_engine *pengine;
- struct qcrypto_req_control *pqcrypto_req_control;
- async_req = &areq->base;
- pstat = &_qcrypto_stat;
- rctx = ablkcipher_request_ctx(areq);
- pengine = rctx->pengine;
- pqcrypto_req_control = find_req_control_for_areq(pengine,
- async_req);
- if (pqcrypto_req_control == NULL) {
- pr_err("async request not found\n");
- return;
- }
- #ifdef QCRYPTO_DEBUG
- dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %pK ret %d\n",
- areq, ret);
- #endif
- if (iv)
- memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
- if (ret) {
- pqcrypto_req_control->res = -ENXIO;
- pstat->ablk_cipher_op_fail++;
- } else {
- pqcrypto_req_control->res = 0;
- pstat->ablk_cipher_op_success++;
- }
- if (cp->ce_support.aligned_only) {
- struct qcrypto_cipher_req_ctx *rctx;
- uint32_t num_sg = 0;
- uint32_t bytes = 0;
- rctx = ablkcipher_request_ctx(areq);
- areq->src = rctx->orig_src;
- areq->dst = rctx->orig_dst;
- num_sg = qcrypto_count_sg(areq->dst, areq->nbytes);
- bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
- rctx->data, areq->nbytes);
- if (bytes != areq->nbytes)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
- areq->nbytes);
- kzfree(rctx->data);
- }
- req_done(pqcrypto_req_control);
- };
- static void _qce_aead_complete(void *cookie, unsigned char *icv,
- unsigned char *iv, int ret)
- {
- struct aead_request *areq = (struct aead_request *) cookie;
- struct crypto_async_request *async_req;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
- struct qcrypto_cipher_req_ctx *rctx;
- struct crypto_stat *pstat;
- struct crypto_engine *pengine;
- struct qcrypto_req_control *pqcrypto_req_control;
- async_req = &areq->base;
- pstat = &_qcrypto_stat;
- rctx = aead_request_ctx(areq);
- pengine = rctx->pengine;
- pqcrypto_req_control = find_req_control_for_areq(pengine,
- async_req);
- if (pqcrypto_req_control == NULL) {
- pr_err("async request not found\n");
- return;
- }
- if (rctx->mode == QCE_MODE_CCM) {
- kzfree(rctx->adata);
- } else {
- uint32_t ivsize = crypto_aead_ivsize(aead);
- if (ret == 0) {
- if (rctx->dir == QCE_ENCRYPT) {
- /* copy the icv to dst */
- scatterwalk_map_and_copy(icv, areq->dst,
- areq->cryptlen + areq->assoclen,
- ctx->authsize, 1);
- } else {
- unsigned char tmp[SHA256_DIGESTSIZE] = {0};
- /* compare icv from src */
- scatterwalk_map_and_copy(tmp,
- areq->src, areq->assoclen +
- areq->cryptlen - ctx->authsize,
- ctx->authsize, 0);
- ret = memcmp(icv, tmp, ctx->authsize);
- if (ret != 0)
- ret = -EBADMSG;
- }
- } else {
- ret = -ENXIO;
- }
- if (iv)
- memcpy(ctx->iv, iv, ivsize);
- }
- if (ret == (-EBADMSG))
- pstat->aead_bad_msg++;
- else if (ret)
- pstat->aead_op_fail++;
- else
- pstat->aead_op_success++;
- pqcrypto_req_control->res = ret;
- req_done(pqcrypto_req_control);
- }
- static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
- {
- __be32 data;
- memset(block, 0, csize);
- block += csize;
- if (csize >= 4)
- csize = 4;
- else if (msglen > (1 << (8 * csize)))
- return -EOVERFLOW;
- data = cpu_to_be32(msglen);
- memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
- return 0;
- }
- static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq, uint32_t assoclen)
- {
- unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
- memcpy(&qreq->nonce[0], qreq->iv, qreq->ivsize);
- /*
- * Format control info per RFC 3610 and
- * NIST Special Publication 800-38C
- */
- qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
- if (assoclen)
- qreq->nonce[0] |= 64;
- if (i > MAX_NONCE)
- return -EINVAL;
- return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
- }
- static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
- struct scatterlist *sg, unsigned char *adata)
- {
- uint32_t len;
- uint32_t bytes = 0;
- uint32_t num_sg = 0;
- /*
- * Add control info for associated data
- * RFC 3610 and NIST Special Publication 800-38C
- */
- if (alen < 65280) {
- *(__be16 *)adata = cpu_to_be16(alen);
- len = 2;
- } else {
- if ((alen >= 65280) && (alen <= 0xffffffff)) {
- *(__be16 *)adata = cpu_to_be16(0xfffe);
- *(__be32 *)&adata[2] = cpu_to_be32(alen);
- len = 6;
- } else {
- *(__be16 *)adata = cpu_to_be16(0xffff);
- *(__be32 *)&adata[6] = cpu_to_be32(alen);
- len = 10;
- }
- }
- adata += len;
- qreq->assoclen = ALIGN((alen + len), 16);
- num_sg = qcrypto_count_sg(sg, alen);
- bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen);
- if (bytes != alen)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes, alen);
- return 0;
- }
- static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
- struct qcrypto_req_control *pqcrypto_req_control)
- {
- struct crypto_async_request *async_req;
- struct qce_req qreq;
- int ret;
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *cipher_ctx;
- struct ablkcipher_request *req;
- struct crypto_ablkcipher *tfm;
- async_req = pqcrypto_req_control->req;
- req = container_of(async_req, struct ablkcipher_request, base);
- cipher_ctx = crypto_tfm_ctx(async_req->tfm);
- rctx = ablkcipher_request_ctx(req);
- rctx->pengine = pengine;
- tfm = crypto_ablkcipher_reqtfm(req);
- if (pengine->pcp->ce_support.aligned_only) {
- uint32_t bytes = 0;
- uint32_t num_sg = 0;
- rctx->orig_src = req->src;
- rctx->orig_dst = req->dst;
- rctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
- if (rctx->data == NULL)
- return -ENOMEM;
- num_sg = qcrypto_count_sg(req->src, req->nbytes);
- bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data,
- req->nbytes);
- if (bytes != req->nbytes)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
- req->nbytes);
- sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
- sg_mark_end(&rctx->dsg);
- rctx->iv = req->info;
- req->src = &rctx->dsg;
- req->dst = &rctx->dsg;
- }
- qreq.op = QCE_REQ_ABLK_CIPHER;
- qreq.qce_cb = _qce_ablk_cipher_complete;
- qreq.areq = req;
- qreq.alg = rctx->alg;
- qreq.dir = rctx->dir;
- qreq.mode = rctx->mode;
- qreq.enckey = cipher_ctx->enc_key;
- qreq.encklen = cipher_ctx->enc_key_len;
- qreq.iv = req->info;
- qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
- qreq.cryptlen = req->nbytes;
- qreq.use_pmem = 0;
- qreq.flags = cipher_ctx->flags;
- if ((cipher_ctx->enc_key_len == 0) &&
- (pengine->pcp->platform_support.hw_key_support == 0))
- ret = -EINVAL;
- else
- ret = qce_ablk_cipher_req(pengine->qce, &qreq);
- return ret;
- }
- static int _qcrypto_process_ahash(struct crypto_engine *pengine,
- struct qcrypto_req_control *pqcrypto_req_control)
- {
- struct crypto_async_request *async_req;
- struct ahash_request *req;
- struct qce_sha_req sreq;
- struct qcrypto_sha_req_ctx *rctx;
- struct qcrypto_sha_ctx *sha_ctx;
- int ret = 0;
- async_req = pqcrypto_req_control->req;
- req = container_of(async_req,
- struct ahash_request, base);
- rctx = ahash_request_ctx(req);
- sha_ctx = crypto_tfm_ctx(async_req->tfm);
- rctx->pengine = pengine;
- sreq.qce_cb = _qce_ahash_complete;
- sreq.digest = &rctx->digest[0];
- sreq.src = req->src;
- sreq.auth_data[0] = rctx->byte_count[0];
- sreq.auth_data[1] = rctx->byte_count[1];
- sreq.auth_data[2] = rctx->byte_count[2];
- sreq.auth_data[3] = rctx->byte_count[3];
- sreq.first_blk = rctx->first_blk;
- sreq.last_blk = rctx->last_blk;
- sreq.size = req->nbytes;
- sreq.areq = req;
- sreq.flags = sha_ctx->flags;
- switch (sha_ctx->alg) {
- case QCE_HASH_SHA1:
- sreq.alg = QCE_HASH_SHA1;
- sreq.authkey = NULL;
- break;
- case QCE_HASH_SHA256:
- sreq.alg = QCE_HASH_SHA256;
- sreq.authkey = NULL;
- break;
- case QCE_HASH_SHA1_HMAC:
- sreq.alg = QCE_HASH_SHA1_HMAC;
- sreq.authkey = &sha_ctx->authkey[0];
- sreq.authklen = SHA_HMAC_KEY_SIZE;
- break;
- case QCE_HASH_SHA256_HMAC:
- sreq.alg = QCE_HASH_SHA256_HMAC;
- sreq.authkey = &sha_ctx->authkey[0];
- sreq.authklen = SHA_HMAC_KEY_SIZE;
- break;
- default:
- pr_err("Algorithm %d not supported, exiting", sha_ctx->alg);
- ret = -1;
- break;
- };
- ret = qce_process_sha_req(pengine->qce, &sreq);
- return ret;
- }
- static int _qcrypto_process_aead(struct crypto_engine *pengine,
- struct qcrypto_req_control *pqcrypto_req_control)
- {
- struct crypto_async_request *async_req;
- struct qce_req qreq;
- int ret = 0;
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *cipher_ctx;
- struct aead_request *req;
- struct crypto_aead *aead;
- async_req = pqcrypto_req_control->req;
- req = container_of(async_req, struct aead_request, base);
- aead = crypto_aead_reqtfm(req);
- rctx = aead_request_ctx(req);
- rctx->pengine = pengine;
- cipher_ctx = crypto_tfm_ctx(async_req->tfm);
- qreq.op = QCE_REQ_AEAD;
- qreq.qce_cb = _qce_aead_complete;
- qreq.areq = req;
- qreq.alg = rctx->alg;
- qreq.dir = rctx->dir;
- qreq.mode = rctx->mode;
- qreq.iv = rctx->iv;
- qreq.enckey = cipher_ctx->enc_key;
- qreq.encklen = cipher_ctx->enc_key_len;
- qreq.authkey = cipher_ctx->auth_key;
- qreq.authklen = cipher_ctx->auth_key_len;
- qreq.authsize = crypto_aead_authsize(aead);
- qreq.auth_alg = cipher_ctx->auth_alg;
- if (qreq.mode == QCE_MODE_CCM)
- qreq.ivsize = AES_BLOCK_SIZE;
- else
- qreq.ivsize = crypto_aead_ivsize(aead);
- qreq.flags = cipher_ctx->flags;
- if (qreq.mode == QCE_MODE_CCM) {
- uint32_t assoclen;
- if (qreq.dir == QCE_ENCRYPT)
- qreq.cryptlen = req->cryptlen;
- else
- qreq.cryptlen = req->cryptlen -
- qreq.authsize;
- /* if rfc4309 ccm, adjust assoclen */
- assoclen = req->assoclen;
- if (rctx->ccmtype)
- assoclen -= 8;
- /* Get NONCE */
- ret = qccrypto_set_aead_ccm_nonce(&qreq, assoclen);
- if (ret)
- return ret;
- if (assoclen) {
- rctx->adata = kzalloc((assoclen + 0x64),
- GFP_ATOMIC);
- if (!rctx->adata)
- return -ENOMEM;
- /* Format Associated data */
- ret = qcrypto_aead_ccm_format_adata(&qreq,
- assoclen,
- req->src,
- rctx->adata);
- } else {
- qreq.assoclen = 0;
- rctx->adata = NULL;
- }
- if (ret) {
- kzfree(rctx->adata);
- return ret;
- }
- /*
- * update req with new formatted associated
- * data info
- */
- qreq.asg = &rctx->asg;
- if (rctx->adata)
- sg_set_buf(qreq.asg, rctx->adata,
- qreq.assoclen);
- sg_mark_end(qreq.asg);
- }
- ret = qce_aead_req(pengine->qce, &qreq);
- return ret;
- }
- static struct crypto_engine *_qcrypto_static_assign_engine(
- struct crypto_priv *cp)
- {
- struct crypto_engine *pengine;
- unsigned long flags;
- spin_lock_irqsave(&cp->lock, flags);
- if (cp->next_engine)
- pengine = cp->next_engine;
- else
- pengine = list_first_entry(&cp->engine_list,
- struct crypto_engine, elist);
- if (list_is_last(&pengine->elist, &cp->engine_list))
- cp->next_engine = list_first_entry(
- &cp->engine_list, struct crypto_engine, elist);
- else
- cp->next_engine = list_next_entry(pengine, elist);
- spin_unlock_irqrestore(&cp->lock, flags);
- return pengine;
- }
- static int _start_qcrypto_process(struct crypto_priv *cp,
- struct crypto_engine *pengine)
- {
- struct crypto_async_request *async_req = NULL;
- struct crypto_async_request *backlog_eng = NULL;
- struct crypto_async_request *backlog_cp = NULL;
- unsigned long flags;
- u32 type;
- int ret = 0;
- struct crypto_stat *pstat;
- void *tfm_ctx;
- struct qcrypto_cipher_req_ctx *cipher_rctx;
- struct qcrypto_sha_req_ctx *ahash_rctx;
- struct ablkcipher_request *ablkcipher_req;
- struct ahash_request *ahash_req;
- struct aead_request *aead_req;
- struct qcrypto_resp_ctx *arsp;
- struct qcrypto_req_control *pqcrypto_req_control;
- unsigned int cpu = MAX_SMP_CPU;
- if (READ_ONCE(cp->ce_req_proc_sts) == STOPPED)
- return 0;
- if (in_interrupt()) {
- cpu = smp_processor_id();
- if (cpu >= MAX_SMP_CPU)
- cpu = MAX_SMP_CPU - 1;
- } else
- cpu = MAX_SMP_CPU;
- pstat = &_qcrypto_stat;
- again:
- spin_lock_irqsave(&cp->lock, flags);
- if (pengine->issue_req ||
- atomic_read(&pengine->req_count) >= (pengine->max_req)) {
- spin_unlock_irqrestore(&cp->lock, flags);
- return 0;
- }
- backlog_eng = crypto_get_backlog(&pengine->req_queue);
- /* make sure it is in high bandwidth state */
- if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
- spin_unlock_irqrestore(&cp->lock, flags);
- return 0;
- }
- /* try to get request from request queue of the engine first */
- async_req = crypto_dequeue_request(&pengine->req_queue);
- if (!async_req) {
- /*
- * if no request from the engine,
- * try to get from request queue of driver
- */
- backlog_cp = crypto_get_backlog(&cp->req_queue);
- async_req = crypto_dequeue_request(&cp->req_queue);
- if (!async_req) {
- spin_unlock_irqrestore(&cp->lock, flags);
- return 0;
- }
- }
- pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
- if (pqcrypto_req_control == NULL) {
- pr_err("Allocation of request failed\n");
- spin_unlock_irqrestore(&cp->lock, flags);
- return 0;
- }
- /* add associated rsp entry to tfm response queue */
- type = crypto_tfm_alg_type(async_req->tfm);
- tfm_ctx = crypto_tfm_ctx(async_req->tfm);
- switch (type) {
- case CRYPTO_ALG_TYPE_AHASH:
- ahash_req = container_of(async_req,
- struct ahash_request, base);
- ahash_rctx = ahash_request_ctx(ahash_req);
- arsp = &ahash_rctx->rsp_entry;
- list_add_tail(
- &arsp->list,
- &((struct qcrypto_sha_ctx *)tfm_ctx)
- ->rsp_queue);
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- ablkcipher_req = container_of(async_req,
- struct ablkcipher_request, base);
- cipher_rctx = ablkcipher_request_ctx(ablkcipher_req);
- arsp = &cipher_rctx->rsp_entry;
- list_add_tail(
- &arsp->list,
- &((struct qcrypto_cipher_ctx *)tfm_ctx)
- ->rsp_queue);
- break;
- case CRYPTO_ALG_TYPE_AEAD:
- default:
- aead_req = container_of(async_req,
- struct aead_request, base);
- cipher_rctx = aead_request_ctx(aead_req);
- arsp = &cipher_rctx->rsp_entry;
- list_add_tail(
- &arsp->list,
- &((struct qcrypto_cipher_ctx *)tfm_ctx)
- ->rsp_queue);
- break;
- }
- arsp->res = -EINPROGRESS;
- arsp->async_req = async_req;
- pqcrypto_req_control->pce = pengine;
- pqcrypto_req_control->req = async_req;
- pqcrypto_req_control->arsp = arsp;
- pengine->active_seq++;
- pengine->check_flag = true;
- pengine->issue_req = true;
- cp->cpu_req[cpu]++;
- smp_mb(); /* make it visible */
- spin_unlock_irqrestore(&cp->lock, flags);
- if (backlog_eng)
- backlog_eng->complete(backlog_eng, -EINPROGRESS);
- if (backlog_cp)
- backlog_cp->complete(backlog_cp, -EINPROGRESS);
- switch (type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- ret = _qcrypto_process_ablkcipher(pengine,
- pqcrypto_req_control);
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
- break;
- case CRYPTO_ALG_TYPE_AEAD:
- ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
- break;
- default:
- ret = -EINVAL;
- };
- pengine->issue_req = false;
- smp_mb(); /* make it visible */
- pengine->total_req++;
- if (ret) {
- pengine->err_req++;
- qcrypto_free_req_control(pengine, pqcrypto_req_control);
- if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
- pstat->ablk_cipher_op_fail++;
- else
- if (type == CRYPTO_ALG_TYPE_AHASH)
- pstat->ahash_op_fail++;
- else
- pstat->aead_op_fail++;
- _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret);
- goto again;
- };
- return ret;
- }
- static inline struct crypto_engine *_next_eng(struct crypto_priv *cp,
- struct crypto_engine *p)
- {
- if (p == NULL || list_is_last(&p->elist, &cp->engine_list))
- p = list_first_entry(&cp->engine_list, struct crypto_engine,
- elist);
- else
- p = list_entry(p->elist.next, struct crypto_engine, elist);
- return p;
- }
- static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
- {
- /* call this function with spinlock set */
- struct crypto_engine *q = NULL;
- struct crypto_engine *p = cp->scheduled_eng;
- struct crypto_engine *q1;
- int eng_cnt = cp->total_units;
- if (unlikely(list_empty(&cp->engine_list))) {
- pr_err("%s: no valid ce to schedule\n", __func__);
- return NULL;
- }
- p = _next_eng(cp, p);
- q1 = p;
- while (eng_cnt-- > 0) {
- if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) {
- q = p;
- break;
- }
- p = _next_eng(cp, p);
- if (q1 == p)
- break;
- }
- cp->scheduled_eng = q;
- return q;
- }
- static int _qcrypto_queue_req(struct crypto_priv *cp,
- struct crypto_engine *pengine,
- struct crypto_async_request *req)
- {
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&cp->lock, flags);
- if (pengine) {
- ret = crypto_enqueue_request(&pengine->req_queue, req);
- } else {
- ret = crypto_enqueue_request(&cp->req_queue, req);
- pengine = _avail_eng(cp);
- if (cp->req_queue.qlen > cp->max_qlen)
- cp->max_qlen = cp->req_queue.qlen;
- }
- if (pengine) {
- switch (pengine->bw_state) {
- case BUS_NO_BANDWIDTH:
- if (pengine->high_bw_req == false) {
- qcrypto_ce_bw_allocate_req(pengine);
- pengine->high_bw_req = true;
- }
- pengine = NULL;
- break;
- case BUS_HAS_BANDWIDTH:
- break;
- case BUS_BANDWIDTH_RELEASING:
- pengine->high_bw_req = true;
- pengine = NULL;
- break;
- case BUS_BANDWIDTH_ALLOCATING:
- pengine = NULL;
- break;
- case BUS_SUSPENDED:
- case BUS_SUSPENDING:
- default:
- pengine = NULL;
- break;
- }
- } else {
- cp->no_avail++;
- }
- spin_unlock_irqrestore(&cp->lock, flags);
- if (pengine && (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS))
- _start_qcrypto_process(cp, pengine);
- return ret;
- }
- static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
- skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
- }
- static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
- skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
- }
- static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %pK\n", req);
- #endif
- if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
- (!cp->ce_support.aes_key_192) &&
- ctx->cipher_aes192_fb)
- return _qcrypto_enc_aes_192_fallback(req);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_ECB;
- pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %pK\n", req);
- #endif
- if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
- (!cp->ce_support.aes_key_192) &&
- ctx->cipher_aes192_fb)
- return _qcrypto_enc_aes_192_fallback(req);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %pK\n", req);
- #endif
- if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
- (!cp->ce_support.aes_key_192) &&
- ctx->cipher_aes192_fb)
- return _qcrypto_enc_aes_192_fallback(req);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CTR;
- pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_XTS;
- pstat->ablk_cipher_aes_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
- return -EINVAL;
- if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
- (ctx->auth_key_len != AES_KEYSIZE_256))
- return -EINVAL;
- pstat = &_qcrypto_stat;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CCM;
- rctx->iv = req->iv;
- rctx->ccmtype = 0;
- pstat->aead_ccm_aes_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- if (req->assoclen != 16 && req->assoclen != 20)
- return -EINVAL;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CCM;
- memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
- rctx->rfc4309_iv[0] = 3; /* L -1 */
- memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
- memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
- rctx->ccmtype = 1;
- rctx->iv = rctx->rfc4309_iv;
- pstat->aead_rfc4309_ccm_aes_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_ECB;
- pstat->ablk_cipher_des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- pstat->ablk_cipher_des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_3DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_ECB;
- pstat->ablk_cipher_3des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_3DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- pstat->ablk_cipher_3des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %pK\n", req);
- #endif
- if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
- (!cp->ce_support.aes_key_192) &&
- ctx->cipher_aes192_fb)
- return _qcrypto_dec_aes_192_fallback(req);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_ECB;
- pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %pK\n", req);
- #endif
- if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
- (!cp->ce_support.aes_key_192) &&
- ctx->cipher_aes192_fb)
- return _qcrypto_dec_aes_192_fallback(req);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CBC;
- pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %pK\n", req);
- #endif
- if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
- (!cp->ce_support.aes_key_192) &&
- ctx->cipher_aes192_fb)
- return _qcrypto_dec_aes_192_fallback(req);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->mode = QCE_MODE_CTR;
- /* Note. There is no such thing as aes/counter mode, decrypt */
- rctx->dir = QCE_ENCRYPT;
- pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_DES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_ECB;
- pstat->ablk_cipher_des_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_DES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CBC;
- pstat->ablk_cipher_des_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_3DES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_ECB;
- pstat->ablk_cipher_3des_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_3DES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CBC;
- pstat->ablk_cipher_3des_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
- CRYPTO_ALG_TYPE_ABLKCIPHER);
- rctx = ablkcipher_request_ctx(req);
- rctx->aead = 0;
- rctx->alg = CIPHER_ALG_AES;
- rctx->mode = QCE_MODE_XTS;
- rctx->dir = QCE_DECRYPT;
- pstat->ablk_cipher_aes_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- };
- static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
- return -EINVAL;
- if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
- (ctx->auth_key_len != AES_KEYSIZE_256))
- return -EINVAL;
- pstat = &_qcrypto_stat;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CCM;
- rctx->iv = req->iv;
- rctx->ccmtype = 0;
- pstat->aead_ccm_aes_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- if (req->assoclen != 16 && req->assoclen != 20)
- return -EINVAL;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CCM;
- memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
- rctx->rfc4309_iv[0] = 3; /* L -1 */
- memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
- memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
- rctx->ccmtype = 1;
- rctx->iv = rctx->rfc4309_iv;
- pstat->aead_rfc4309_ccm_aes_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
- ctx->authsize = authsize;
- return 0;
- }
- static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
- switch (authsize) {
- case 4:
- case 6:
- case 8:
- case 10:
- case 12:
- case 14:
- case 16:
- break;
- default:
- return -EINVAL;
- }
- ctx->authsize = authsize;
- return 0;
- }
- static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
- switch (authsize) {
- case 8:
- case 12:
- case 16:
- break;
- default:
- return -EINVAL;
- }
- ctx->authsize = authsize;
- return 0;
- }
- static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
- struct crypto_authenc_key_param *param;
- int ret;
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
- param = RTA_DATA(rta);
- ctx->enc_key_len = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < ctx->enc_key_len)
- goto badkey;
- ctx->auth_key_len = keylen - ctx->enc_key_len;
- if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
- ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
- goto badkey;
- memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
- memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
- memcpy(ctx->auth_key, key, ctx->auth_key_len);
- if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
- ctx->ahash_aead_aes192_fb) {
- crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0);
- ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb,
- ctx->auth_key, ctx->auth_key_len);
- if (ret)
- goto badkey;
- crypto_skcipher_clear_flags(ctx->cipher_aes192_fb, ~0);
- ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb,
- ctx->enc_key, ctx->enc_key_len);
- if (ret)
- goto badkey;
- }
- return 0;
- badkey:
- ctx->enc_key_len = 0;
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
- {
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_priv *cp = ctx->cp;
- switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_256:
- break;
- case AES_KEYSIZE_192:
- if (cp->ce_support.aes_key_192)
- break;
- default:
- ctx->enc_key_len = 0;
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- };
- ctx->enc_key_len = keylen;
- memcpy(ctx->enc_key, key, keylen);
- ctx->auth_key_len = keylen;
- memcpy(ctx->auth_key, key, keylen);
- return 0;
- }
- static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int key_len)
- {
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret;
- if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
- return -EINVAL;
- key_len -= QCRYPTO_CCM4309_NONCE_LEN;
- memcpy(ctx->ccm4309_nonce, key + key_len, QCRYPTO_CCM4309_NONCE_LEN);
- ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
- return ret;
- };
- static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx,
- int res)
- {
- struct aead_request *req;
- struct crypto_async_request *areq;
- req = rctx->aead_req;
- areq = &req->base;
- if (rctx->fb_aes_req)
- skcipher_request_free(rctx->fb_aes_req);
- if (rctx->fb_hash_req)
- ahash_request_free(rctx->fb_hash_req);
- rctx->fb_aes_req = NULL;
- rctx->fb_hash_req = NULL;
- kfree(rctx->fb_aes_iv);
- areq->complete(areq, res);
- }
- static void _aead_aes_fb_stage2_ahash_complete(
- struct crypto_async_request *base, int err)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct aead_request *req;
- struct qcrypto_cipher_ctx *ctx;
- rctx = base->data;
- req = rctx->aead_req;
- ctx = crypto_tfm_ctx(req->base.tfm);
- /* copy icv */
- if (err == 0)
- scatterwalk_map_and_copy(rctx->fb_ahash_digest,
- rctx->fb_aes_dst,
- req->cryptlen,
- ctx->authsize, 1);
- _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
- }
- static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx)
- {
- struct ahash_request *ahash_req;
- ahash_req = rctx->fb_hash_req;
- ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- _aead_aes_fb_stage2_ahash_complete, rctx);
- return crypto_ahash_digest(ahash_req);
- }
- static void _aead_aes_fb_stage2_decrypt_complete(
- struct crypto_async_request *base, int err)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- rctx = base->data;
- _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
- }
- static int _start_aead_aes_fb_stage2_decrypt(
- struct qcrypto_cipher_req_ctx *rctx)
- {
- struct skcipher_request *aes_req;
- aes_req = rctx->fb_aes_req;
- skcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- _aead_aes_fb_stage2_decrypt_complete, rctx);
- return crypto_skcipher_decrypt(aes_req);
- }
- static void _aead_aes_fb_stage1_ahash_complete(
- struct crypto_async_request *base, int err)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct aead_request *req;
- struct qcrypto_cipher_ctx *ctx;
- rctx = base->data;
- req = rctx->aead_req;
- ctx = crypto_tfm_ctx(req->base.tfm);
- /* compare icv */
- if (err == 0) {
- unsigned char tmp[ctx->authsize];
- scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
- req->cryptlen - ctx->authsize, ctx->authsize, 0);
- if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
- err = -EBADMSG;
- }
- if (err)
- _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
- else {
- err = _start_aead_aes_fb_stage2_decrypt(rctx);
- if (err != -EINPROGRESS && err != -EBUSY)
- _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
- }
- }
- static void _aead_aes_fb_stage1_encrypt_complete(
- struct crypto_async_request *base, int err)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct aead_request *req;
- struct qcrypto_cipher_ctx *ctx;
- rctx = base->data;
- req = rctx->aead_req;
- ctx = crypto_tfm_ctx(req->base.tfm);
- memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
- if (err) {
- _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
- return;
- }
- err = _start_aead_aes_fb_stage2_hmac(rctx);
- /* copy icv */
- if (err == 0) {
- scatterwalk_map_and_copy(rctx->fb_ahash_digest,
- rctx->fb_aes_dst,
- req->cryptlen,
- ctx->authsize, 1);
- }
- if (err != -EINPROGRESS && err != -EBUSY)
- _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
- }
- static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
- bool is_encrypt)
- {
- int rc = -EINVAL;
- struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
- struct skcipher_request *aes_req = NULL;
- struct ahash_request *ahash_req = NULL;
- int nbytes;
- struct scatterlist *src, *dst;
- rctx->fb_aes_iv = NULL;
- aes_req = skcipher_request_alloc(ctx->cipher_aes192_fb, GFP_KERNEL);
- if (!aes_req)
- return -ENOMEM;
- ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL);
- if (!ahash_req)
- goto ret;
- rctx->fb_aes_req = aes_req;
- rctx->fb_hash_req = ahash_req;
- rctx->aead_req = req;
- /* assoc and iv are sitting in the beginning of src sg list */
- /* Similarly, assoc and iv are sitting in the beginning of dst list */
- src = scatterwalk_ffwd(rctx->fb_ablkcipher_src_sg, req->src,
- req->assoclen);
- dst = scatterwalk_ffwd(rctx->fb_ablkcipher_dst_sg, req->dst,
- req->assoclen);
- nbytes = req->cryptlen;
- if (!is_encrypt)
- nbytes -= ctx->authsize;
- rctx->fb_ahash_length = nbytes + req->assoclen;
- rctx->fb_aes_src = src;
- rctx->fb_aes_dst = dst;
- rctx->fb_aes_cryptlen = nbytes;
- rctx->ivsize = crypto_aead_ivsize(aead_tfm);
- rctx->fb_aes_iv = kzalloc(rctx->ivsize, GFP_ATOMIC);
- if (!rctx->fb_aes_iv)
- goto ret;
- memcpy(rctx->fb_aes_iv, req->iv, rctx->ivsize);
- skcipher_request_set_crypt(aes_req, rctx->fb_aes_src,
- rctx->fb_aes_dst,
- rctx->fb_aes_cryptlen, rctx->fb_aes_iv);
- if (is_encrypt)
- ahash_request_set_crypt(ahash_req, req->dst,
- rctx->fb_ahash_digest,
- rctx->fb_ahash_length);
- else
- ahash_request_set_crypt(ahash_req, req->src,
- rctx->fb_ahash_digest,
- rctx->fb_ahash_length);
- if (is_encrypt) {
- skcipher_request_set_callback(aes_req,
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- _aead_aes_fb_stage1_encrypt_complete, rctx);
- rc = crypto_skcipher_encrypt(aes_req);
- if (rc == 0) {
- memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
- rc = _start_aead_aes_fb_stage2_hmac(rctx);
- if (rc == 0) {
- /* copy icv */
- scatterwalk_map_and_copy(rctx->fb_ahash_digest,
- dst,
- req->cryptlen,
- ctx->authsize, 1);
- }
- }
- if (rc == -EINPROGRESS || rc == -EBUSY)
- return rc;
- goto ret;
- } else {
- ahash_request_set_callback(ahash_req,
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- _aead_aes_fb_stage1_ahash_complete, rctx);
- rc = crypto_ahash_digest(ahash_req);
- if (rc == 0) {
- unsigned char tmp[ctx->authsize];
- /* compare icv */
- scatterwalk_map_and_copy(tmp,
- src, req->cryptlen - ctx->authsize,
- ctx->authsize, 0);
- if (memcmp(rctx->fb_ahash_digest, tmp,
- ctx->authsize) != 0)
- rc = -EBADMSG;
- else
- rc = _start_aead_aes_fb_stage2_decrypt(rctx);
- }
- if (rc == -EINPROGRESS || rc == -EBUSY)
- return rc;
- goto ret;
- }
- ret:
- if (aes_req)
- skcipher_request_free(aes_req);
- if (ahash_req)
- ahash_request_free(ahash_req);
- kfree(rctx->fb_aes_iv);
- return rc;
- }
- static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev,
- "_qcrypto_aead_encrypt_aes_cbc: %pK\n", req);
- #endif
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->iv;
- rctx->aead_req = req;
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_aes_enc++;
- else
- pstat->aead_sha256_aes_enc++;
- if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
- ctx->ahash_aead_aes192_fb)
- return _qcrypto_aead_aes_192_fallback(req, true);
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- #ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev,
- "_qcrypto_aead_decrypt_aes_cbc: %pK\n", req);
- #endif
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_AES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->iv;
- rctx->aead_req = req;
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_aes_dec++;
- else
- pstat->aead_sha256_aes_dec++;
- if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
- ctx->ahash_aead_aes192_fb)
- return _qcrypto_aead_aes_192_fallback(req, false);
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->iv;
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_des_enc++;
- else
- pstat->aead_sha256_des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_DES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->iv;
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_des_dec++;
- else
- pstat->aead_sha256_des_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_3DES;
- rctx->dir = QCE_ENCRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->iv;
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_3des_enc++;
- else
- pstat->aead_sha256_3des_enc++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
- {
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_stat *pstat;
- pstat = &_qcrypto_stat;
- rctx = aead_request_ctx(req);
- rctx->aead = 1;
- rctx->alg = CIPHER_ALG_3DES;
- rctx->dir = QCE_DECRYPT;
- rctx->mode = QCE_MODE_CBC;
- rctx->iv = req->iv;
- if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
- pstat->aead_sha1_3des_dec++;
- else
- pstat->aead_sha256_3des_dec++;
- return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
- }
- static int _sha_init(struct ahash_request *req)
- {
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- rctx->first_blk = 1;
- rctx->last_blk = 0;
- rctx->byte_count[0] = 0;
- rctx->byte_count[1] = 0;
- rctx->byte_count[2] = 0;
- rctx->byte_count[3] = 0;
- rctx->trailing_buf_len = 0;
- rctx->count = 0;
- return 0;
- };
- static int _sha1_init(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_stat *pstat;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- pstat = &_qcrypto_stat;
- _sha_init(req);
- sha_ctx->alg = QCE_HASH_SHA1;
- memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
- memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
- SHA1_DIGEST_SIZE);
- sha_ctx->diglen = SHA1_DIGEST_SIZE;
- pstat->sha1_digest++;
- return 0;
- };
- static int _sha256_init(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_stat *pstat;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- pstat = &_qcrypto_stat;
- _sha_init(req);
- sha_ctx->alg = QCE_HASH_SHA256;
- memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
- memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
- SHA256_DIGEST_SIZE);
- sha_ctx->diglen = SHA256_DIGEST_SIZE;
- pstat->sha256_digest++;
- return 0;
- };
- static int _sha1_export(struct ahash_request *req, void *out)
- {
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha1_state *out_ctx = (struct sha1_state *)out;
- out_ctx->count = rctx->count;
- _byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
- memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
- return 0;
- };
- static int _sha1_hmac_export(struct ahash_request *req, void *out)
- {
- return _sha1_export(req, out);
- }
- /* crypto hw padding constant for hmac first operation */
- #define HMAC_PADDING 64
- static int __sha1_import_common(struct ahash_request *req, const void *in,
- bool hmac)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha1_state *in_ctx = (struct sha1_state *)in;
- u64 hw_count = in_ctx->count;
- rctx->count = in_ctx->count;
- memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
- if (in_ctx->count <= SHA1_BLOCK_SIZE) {
- rctx->first_blk = 1;
- } else {
- rctx->first_blk = 0;
- /*
- * For hmac, there is a hardware padding done
- * when first is set. So the byte_count will be
- * incremened by 64 after the operstion of first
- */
- if (hmac)
- hw_count += HMAC_PADDING;
- }
- rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
- rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
- _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
- rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
- (SHA1_BLOCK_SIZE-1));
- return 0;
- }
- static int _sha1_import(struct ahash_request *req, const void *in)
- {
- return __sha1_import_common(req, in, false);
- }
- static int _sha1_hmac_import(struct ahash_request *req, const void *in)
- {
- return __sha1_import_common(req, in, true);
- }
- static int _sha256_export(struct ahash_request *req, void *out)
- {
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *out_ctx = (struct sha256_state *)out;
- out_ctx->count = rctx->count;
- _byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
- memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
- return 0;
- };
- static int _sha256_hmac_export(struct ahash_request *req, void *out)
- {
- return _sha256_export(req, out);
- }
- static int __sha256_import_common(struct ahash_request *req, const void *in,
- bool hmac)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *in_ctx = (struct sha256_state *)in;
- u64 hw_count = in_ctx->count;
- rctx->count = in_ctx->count;
- memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
- if (in_ctx->count <= SHA256_BLOCK_SIZE) {
- rctx->first_blk = 1;
- } else {
- rctx->first_blk = 0;
- /*
- * for hmac, there is a hardware padding done
- * when first is set. So the byte_count will be
- * incremened by 64 after the operstion of first
- */
- if (hmac)
- hw_count += HMAC_PADDING;
- }
- rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
- rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
- _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
- rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
- (SHA256_BLOCK_SIZE-1));
- return 0;
- }
- static int _sha256_import(struct ahash_request *req, const void *in)
- {
- return __sha256_import_common(req, in, false);
- }
- static int _sha256_hmac_import(struct ahash_request *req, const void *in)
- {
- return __sha256_import_common(req, in, true);
- }
- static int _copy_source(struct ahash_request *req)
- {
- struct qcrypto_sha_req_ctx *srctx = NULL;
- uint32_t bytes = 0;
- uint32_t num_sg = 0;
- srctx = ahash_request_ctx(req);
- srctx->orig_src = req->src;
- srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
- if (srctx->data == NULL) {
- pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
- PTR_ERR(srctx->data), (req->nbytes + 64));
- return -ENOMEM;
- }
- num_sg = qcrypto_count_sg(req->src, req->nbytes);
- bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data,
- req->nbytes);
- if (bytes != req->nbytes)
- pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
- req->nbytes);
- sg_set_buf(&srctx->dsg, srctx->data,
- req->nbytes);
- sg_mark_end(&srctx->dsg);
- req->src = &srctx->dsg;
- return 0;
- }
- static int _sha_update(struct ahash_request *req, uint32_t sha_block_size)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- uint32_t total, len, num_sg;
- struct scatterlist *sg_last;
- uint8_t *k_src = NULL;
- uint32_t sha_pad_len = 0;
- uint32_t trailing_buf_len = 0;
- uint32_t nbytes;
- uint32_t offset = 0;
- uint32_t bytes = 0;
- uint8_t *staging;
- int ret = 0;
- /* check for trailing buffer from previous updates and append it */
- total = req->nbytes + rctx->trailing_buf_len;
- len = req->nbytes;
- if (total <= sha_block_size) {
- k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
- num_sg = qcrypto_count_sg(req->src, len);
- bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
- rctx->trailing_buf_len = total;
- return 0;
- }
- /* save the original req structure fields*/
- rctx->src = req->src;
- rctx->nbytes = req->nbytes;
- staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
- L1_CACHE_BYTES);
- memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
- k_src = &rctx->trailing_buf[0];
- /* get new trailing buffer */
- sha_pad_len = ALIGN(total, sha_block_size) - total;
- trailing_buf_len = sha_block_size - sha_pad_len;
- offset = req->nbytes - trailing_buf_len;
- if (offset != req->nbytes)
- scatterwalk_map_and_copy(k_src, req->src, offset,
- trailing_buf_len, 0);
- nbytes = total - trailing_buf_len;
- num_sg = qcrypto_count_sg(req->src, req->nbytes);
- len = rctx->trailing_buf_len;
- sg_last = req->src;
- while (len < nbytes) {
- if ((len + sg_last->length) > nbytes)
- break;
- len += sg_last->length;
- sg_last = sg_next(sg_last);
- }
- if (rctx->trailing_buf_len) {
- if (cp->ce_support.aligned_only) {
- rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
- if (rctx->data2 == NULL) {
- pr_err("Mem Alloc fail srctx->data2, err %ld\n",
- PTR_ERR(rctx->data2));
- return -ENOMEM;
- }
- memcpy(rctx->data2, staging,
- rctx->trailing_buf_len);
- memcpy((rctx->data2 + rctx->trailing_buf_len),
- rctx->data, req->src->length);
- kzfree(rctx->data);
- rctx->data = rctx->data2;
- sg_set_buf(&rctx->sg[0], rctx->data,
- (rctx->trailing_buf_len +
- req->src->length));
- req->src = rctx->sg;
- sg_mark_end(&rctx->sg[0]);
- } else {
- sg_mark_end(sg_last);
- memset(rctx->sg, 0, sizeof(rctx->sg));
- sg_set_buf(&rctx->sg[0], staging,
- rctx->trailing_buf_len);
- sg_mark_end(&rctx->sg[1]);
- sg_chain(rctx->sg, 2, req->src);
- req->src = rctx->sg;
- }
- } else
- sg_mark_end(sg_last);
- req->nbytes = nbytes;
- rctx->trailing_buf_len = trailing_buf_len;
- ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
- return ret;
- };
- static int _sha1_update(struct ahash_request *req)
- {
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- if (cp->ce_support.aligned_only) {
- if (_copy_source(req))
- return -ENOMEM;
- }
- rctx->count += req->nbytes;
- return _sha_update(req, SHA1_BLOCK_SIZE);
- }
- static int _sha256_update(struct ahash_request *req)
- {
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- if (cp->ce_support.aligned_only) {
- if (_copy_source(req))
- return -ENOMEM;
- }
- rctx->count += req->nbytes;
- return _sha_update(req, SHA256_BLOCK_SIZE);
- }
- static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- int ret = 0;
- uint8_t *staging;
- if (cp->ce_support.aligned_only) {
- if (_copy_source(req))
- return -ENOMEM;
- }
- rctx->last_blk = 1;
- /* save the original req structure fields*/
- rctx->src = req->src;
- rctx->nbytes = req->nbytes;
- staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
- L1_CACHE_BYTES);
- memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
- sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
- sg_mark_end(&rctx->sg[0]);
- req->src = &rctx->sg[0];
- req->nbytes = rctx->trailing_buf_len;
- ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
- return ret;
- };
- static int _sha1_final(struct ahash_request *req)
- {
- return _sha_final(req, SHA1_BLOCK_SIZE);
- }
- static int _sha256_final(struct ahash_request *req)
- {
- return _sha_final(req, SHA256_BLOCK_SIZE);
- }
- static int _sha_digest(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_priv *cp = sha_ctx->cp;
- int ret = 0;
- if (cp->ce_support.aligned_only) {
- if (_copy_source(req))
- return -ENOMEM;
- }
- /* save the original req structure fields*/
- rctx->src = req->src;
- rctx->nbytes = req->nbytes;
- rctx->first_blk = 1;
- rctx->last_blk = 1;
- ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
- return ret;
- }
- static int _sha1_digest(struct ahash_request *req)
- {
- _sha1_init(req);
- return _sha_digest(req);
- }
- static int _sha256_digest(struct ahash_request *req)
- {
- _sha256_init(req);
- return _sha_digest(req);
- }
- static void _crypto_sha_hmac_ahash_req_complete(
- struct crypto_async_request *req, int err)
- {
- struct completion *ahash_req_complete = req->data;
- if (err == -EINPROGRESS)
- return;
- complete(ahash_req_complete);
- }
- static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int len)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
- uint8_t *in_buf;
- int ret = 0;
- struct scatterlist sg = {0};
- struct ahash_request *ahash_req;
- struct completion ahash_req_complete;
- ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (ahash_req == NULL)
- return -ENOMEM;
- init_completion(&ahash_req_complete);
- ahash_request_set_callback(ahash_req,
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- _crypto_sha_hmac_ahash_req_complete,
- &ahash_req_complete);
- crypto_ahash_clear_flags(tfm, ~0);
- in_buf = kzalloc(len + 64, GFP_KERNEL);
- if (in_buf == NULL) {
- ahash_request_free(ahash_req);
- return -ENOMEM;
- }
- memcpy(in_buf, key, len);
- sg_set_buf(&sg, in_buf, len);
- sg_mark_end(&sg);
- ahash_request_set_crypt(ahash_req, &sg,
- &sha_ctx->authkey[0], len);
- if (sha_ctx->alg == QCE_HASH_SHA1)
- ret = _sha1_digest(ahash_req);
- else
- ret = _sha256_digest(ahash_req);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret =
- wait_for_completion_interruptible(
- &ahash_req_complete);
- reinit_completion(&sha_ctx->ahash_req_complete);
- }
- kzfree(in_buf);
- ahash_request_free(ahash_req);
- return ret;
- }
- static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int len)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
- int ret = 0;
- memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
- if (len <= SHA1_BLOCK_SIZE) {
- memcpy(&sha_ctx->authkey[0], key, len);
- sha_ctx->authkey_in_len = len;
- } else {
- sha_ctx->alg = QCE_HASH_SHA1;
- sha_ctx->diglen = SHA1_DIGEST_SIZE;
- ret = _sha_hmac_setkey(tfm, key, len);
- if (ret)
- pr_err("SHA1 hmac setkey failed\n");
- sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
- }
- return ret;
- }
- static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int len)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
- int ret = 0;
- memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
- if (len <= SHA256_BLOCK_SIZE) {
- memcpy(&sha_ctx->authkey[0], key, len);
- sha_ctx->authkey_in_len = len;
- } else {
- sha_ctx->alg = QCE_HASH_SHA256;
- sha_ctx->diglen = SHA256_DIGEST_SIZE;
- ret = _sha_hmac_setkey(tfm, key, len);
- if (ret)
- pr_err("SHA256 hmac setkey failed\n");
- sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
- }
- return ret;
- }
- static int _sha_hmac_init_ihash(struct ahash_request *req,
- uint32_t sha_block_size)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- int i;
- for (i = 0; i < sha_block_size; i++)
- rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
- rctx->trailing_buf_len = sha_block_size;
- return 0;
- }
- static int _sha1_hmac_init(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- struct crypto_stat *pstat;
- int ret = 0;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- pstat = &_qcrypto_stat;
- pstat->sha1_hmac_digest++;
- _sha_init(req);
- memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
- memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
- SHA1_DIGEST_SIZE);
- sha_ctx->diglen = SHA1_DIGEST_SIZE;
- if (cp->ce_support.sha_hmac)
- sha_ctx->alg = QCE_HASH_SHA1_HMAC;
- else {
- sha_ctx->alg = QCE_HASH_SHA1;
- ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
- }
- return ret;
- }
- static int _sha256_hmac_init(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- struct crypto_stat *pstat;
- int ret = 0;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- pstat = &_qcrypto_stat;
- pstat->sha256_hmac_digest++;
- _sha_init(req);
- memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
- memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
- SHA256_DIGEST_SIZE);
- sha_ctx->diglen = SHA256_DIGEST_SIZE;
- if (cp->ce_support.sha_hmac)
- sha_ctx->alg = QCE_HASH_SHA256_HMAC;
- else {
- sha_ctx->alg = QCE_HASH_SHA256;
- ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
- }
- return ret;
- }
- static int _sha1_hmac_update(struct ahash_request *req)
- {
- return _sha1_update(req);
- }
- static int _sha256_hmac_update(struct ahash_request *req)
- {
- return _sha256_update(req);
- }
- static int _sha_hmac_outer_hash(struct ahash_request *req,
- uint32_t sha_digest_size, uint32_t sha_block_size)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_priv *cp = sha_ctx->cp;
- int i;
- uint8_t *staging;
- uint8_t *p;
- staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
- L1_CACHE_BYTES);
- p = staging;
- for (i = 0; i < sha_block_size; i++)
- *p++ = sha_ctx->authkey[i] ^ 0x5c;
- memcpy(p, &rctx->digest[0], sha_digest_size);
- sg_set_buf(&rctx->sg[0], staging, sha_block_size +
- sha_digest_size);
- sg_mark_end(&rctx->sg[0]);
- /* save the original req structure fields*/
- rctx->src = req->src;
- rctx->nbytes = req->nbytes;
- req->src = &rctx->sg[0];
- req->nbytes = sha_block_size + sha_digest_size;
- _sha_init(req);
- if (sha_ctx->alg == QCE_HASH_SHA1) {
- memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
- SHA1_DIGEST_SIZE);
- sha_ctx->diglen = SHA1_DIGEST_SIZE;
- } else {
- memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
- SHA256_DIGEST_SIZE);
- sha_ctx->diglen = SHA256_DIGEST_SIZE;
- }
- rctx->last_blk = 1;
- return _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
- }
- static int _sha_hmac_inner_hash(struct ahash_request *req,
- uint32_t sha_digest_size, uint32_t sha_block_size)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct ahash_request *areq = sha_ctx->ahash_req;
- struct crypto_priv *cp = sha_ctx->cp;
- int ret = 0;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- uint8_t *staging;
- staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
- L1_CACHE_BYTES);
- memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
- sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
- sg_mark_end(&rctx->sg[0]);
- ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
- rctx->trailing_buf_len);
- rctx->last_blk = 1;
- ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret =
- wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
- reinit_completion(&sha_ctx->ahash_req_complete);
- }
- return ret;
- }
- static int _sha1_hmac_final(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- int ret = 0;
- if (cp->ce_support.sha_hmac)
- return _sha_final(req, SHA1_BLOCK_SIZE);
- ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
- if (ret)
- return ret;
- return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
- }
- static int _sha256_hmac_final(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = sha_ctx->cp;
- int ret = 0;
- if (cp->ce_support.sha_hmac)
- return _sha_final(req, SHA256_BLOCK_SIZE);
- ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
- if (ret)
- return ret;
- return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
- }
- static int _sha1_hmac_digest(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_stat *pstat;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- pstat = &_qcrypto_stat;
- pstat->sha1_hmac_digest++;
- _sha_init(req);
- memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
- SHA1_DIGEST_SIZE);
- sha_ctx->diglen = SHA1_DIGEST_SIZE;
- sha_ctx->alg = QCE_HASH_SHA1_HMAC;
- return _sha_digest(req);
- }
- static int _sha256_hmac_digest(struct ahash_request *req)
- {
- struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_stat *pstat;
- struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
- pstat = &_qcrypto_stat;
- pstat->sha256_hmac_digest++;
- _sha_init(req);
- memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
- SHA256_DIGEST_SIZE);
- sha_ctx->diglen = SHA256_DIGEST_SIZE;
- sha_ctx->alg = QCE_HASH_SHA256_HMAC;
- return _sha_digest(req);
- }
- static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
- {
- char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
- if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-"))
- return -EINVAL;
- strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
- strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
- return 0;
- }
- int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_engine *pengine = NULL;
- pengine = _qrypto_find_pengine_device(cp, dev);
- if (pengine == NULL)
- return -ENODEV;
- ctx->pengine = pengine;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_cipher_set_device);
- int qcrypto_cipher_set_device_hw(struct skcipher_request *req, u32 dev,
- u32 hw_inst)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_engine *pengine = NULL;
- pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst);
- if (pengine == NULL)
- return -ENODEV;
- ctx->pengine = pengine;
- return 0;
- }
- EXPORT_SYMBOL(qcrypto_cipher_set_device_hw);
- int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_engine *pengine = NULL;
- pengine = _qrypto_find_pengine_device(cp, dev);
- if (pengine == NULL)
- return -ENODEV;
- ctx->pengine = pengine;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_aead_set_device);
- int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
- {
- struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- struct crypto_engine *pengine = NULL;
- pengine = _qrypto_find_pengine_device(cp, dev);
- if (pengine == NULL)
- return -ENODEV;
- ctx->pengine = pengine;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_ahash_set_device);
- int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
- (cp->platform_support.hw_key_support == false)) {
- pr_err("%s HW key usage not supported\n", __func__);
- return -EINVAL;
- }
- if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
- QCRYPTO_CTX_KEY_MASK) {
- pr_err("%s Cannot set all key flags\n", __func__);
- return -EINVAL;
- }
- ctx->flags |= flags;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_cipher_set_flag);
- int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
- (cp->platform_support.hw_key_support == false)) {
- pr_err("%s HW key usage not supported\n", __func__);
- return -EINVAL;
- }
- if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
- QCRYPTO_CTX_KEY_MASK) {
- pr_err("%s Cannot set all key flags\n", __func__);
- return -EINVAL;
- }
- ctx->flags |= flags;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_aead_set_flag);
- int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags)
- {
- struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct crypto_priv *cp = ctx->cp;
- if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
- (cp->platform_support.hw_key_support == false)) {
- pr_err("%s HW key usage not supported\n", __func__);
- return -EINVAL;
- }
- if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
- QCRYPTO_CTX_KEY_MASK) {
- pr_err("%s Cannot set all key flags\n", __func__);
- return -EINVAL;
- }
- ctx->flags |= flags;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_ahash_set_flag);
- int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
- unsigned int flags)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- ctx->flags &= ~flags;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_cipher_clear_flag);
- int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags)
- {
- struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- ctx->flags &= ~flags;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_aead_clear_flag);
- int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags)
- {
- struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- ctx->flags &= ~flags;
- return 0;
- };
- EXPORT_SYMBOL(qcrypto_ahash_clear_flag);
- static struct ahash_alg _qcrypto_ahash_algos[] = {
- {
- .init = _sha1_init,
- .update = _sha1_update,
- .final = _sha1_final,
- .export = _sha1_export,
- .import = _sha1_import,
- .digest = _sha1_digest,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "qcrypto-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize =
- sizeof(struct qcrypto_sha_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ahash_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_ahash_cra_init,
- .cra_exit = _qcrypto_ahash_cra_exit,
- },
- },
- },
- {
- .init = _sha256_init,
- .update = _sha256_update,
- .final = _sha256_final,
- .export = _sha256_export,
- .import = _sha256_import,
- .digest = _sha256_digest,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "qcrypto-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize =
- sizeof(struct qcrypto_sha_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ahash_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_ahash_cra_init,
- .cra_exit = _qcrypto_ahash_cra_exit,
- },
- },
- },
- };
- static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
- {
- .init = _sha1_hmac_init,
- .update = _sha1_hmac_update,
- .final = _sha1_hmac_final,
- .export = _sha1_hmac_export,
- .import = _sha1_hmac_import,
- .digest = _sha1_hmac_digest,
- .setkey = _sha1_hmac_setkey,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "qcrypto-hmac-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize =
- sizeof(struct qcrypto_sha_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ahash_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_ahash_hmac_cra_init,
- .cra_exit = _qcrypto_ahash_cra_exit,
- },
- },
- },
- {
- .init = _sha256_hmac_init,
- .update = _sha256_hmac_update,
- .final = _sha256_hmac_final,
- .export = _sha256_hmac_export,
- .import = _sha256_hmac_import,
- .digest = _sha256_hmac_digest,
- .setkey = _sha256_hmac_setkey,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "qcrypto-hmac-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize =
- sizeof(struct qcrypto_sha_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ahash_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_ahash_hmac_cra_init,
- .cra_exit = _qcrypto_ahash_cra_exit,
- },
- },
- },
- };
- static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
- {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "qcrypto-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aes_ablkcipher_init,
- .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = _qcrypto_setkey_aes,
- .encrypt = _qcrypto_enc_aes_ecb,
- .decrypt = _qcrypto_dec_aes_ecb,
- },
- },
- },
- {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "qcrypto-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aes_ablkcipher_init,
- .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = _qcrypto_setkey_aes,
- .encrypt = _qcrypto_enc_aes_cbc,
- .decrypt = _qcrypto_dec_aes_cbc,
- },
- },
- },
- {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "qcrypto-ctr-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_aes_ablkcipher_init,
- .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = _qcrypto_setkey_aes,
- .encrypt = _qcrypto_enc_aes_ctr,
- .decrypt = _qcrypto_dec_aes_ctr,
- },
- },
- },
- {
- .cra_name = "ecb(des)",
- .cra_driver_name = "qcrypto-ecb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_ablkcipher_init,
- .cra_exit = _qcrypto_cra_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = _qcrypto_setkey_des,
- .encrypt = _qcrypto_enc_des_ecb,
- .decrypt = _qcrypto_dec_des_ecb,
- },
- },
- },
- {
- .cra_name = "cbc(des)",
- .cra_driver_name = "qcrypto-cbc-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_ablkcipher_init,
- .cra_exit = _qcrypto_cra_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = _qcrypto_setkey_des,
- .encrypt = _qcrypto_enc_des_cbc,
- .decrypt = _qcrypto_dec_des_cbc,
- },
- },
- },
- {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "qcrypto-ecb-3des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_ablkcipher_init,
- .cra_exit = _qcrypto_cra_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = _qcrypto_setkey_3des,
- .encrypt = _qcrypto_enc_3des_ecb,
- .decrypt = _qcrypto_dec_3des_ecb,
- },
- },
- },
- {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "qcrypto-cbc-3des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_ablkcipher_init,
- .cra_exit = _qcrypto_cra_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = _qcrypto_setkey_3des,
- .encrypt = _qcrypto_enc_3des_cbc,
- .decrypt = _qcrypto_dec_3des_cbc,
- },
- },
- },
- };
- static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "qcrypto-xts-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = _qcrypto_cra_ablkcipher_init,
- .cra_exit = _qcrypto_cra_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = _qcrypto_setkey_aes_xts,
- .encrypt = _qcrypto_enc_aes_xts,
- .decrypt = _qcrypto_dec_aes_xts,
- },
- },
- };
- static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = {
- {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .setkey = _qcrypto_aead_setkey,
- .setauthsize = _qcrypto_aead_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_aes_cbc,
- .decrypt = _qcrypto_aead_decrypt_aes_cbc,
- .init = _qcrypto_cra_aead_aes_sha1_init,
- .exit = _qcrypto_cra_aead_aes_exit,
- },
- {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(des))",
- .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = DES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .setkey = _qcrypto_aead_setkey,
- .setauthsize = _qcrypto_aead_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_des_cbc,
- .decrypt = _qcrypto_aead_decrypt_des_cbc,
- .init = _qcrypto_cra_aead_sha1_init,
- .exit = _qcrypto_cra_aead_exit,
- },
- {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .setkey = _qcrypto_aead_setkey,
- .setauthsize = _qcrypto_aead_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_3des_cbc,
- .decrypt = _qcrypto_aead_decrypt_3des_cbc,
- .init = _qcrypto_cra_aead_sha1_init,
- .exit = _qcrypto_cra_aead_exit,
- },
- };
- static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = {
- {
- .base = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .setkey = _qcrypto_aead_setkey,
- .setauthsize = _qcrypto_aead_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_aes_cbc,
- .decrypt = _qcrypto_aead_decrypt_aes_cbc,
- .init = _qcrypto_cra_aead_aes_sha256_init,
- .exit = _qcrypto_cra_aead_aes_exit,
- },
- {
- .base = {
- .cra_name = "authenc(hmac(sha256),cbc(des))",
- .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = DES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .setkey = _qcrypto_aead_setkey,
- .setauthsize = _qcrypto_aead_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_des_cbc,
- .decrypt = _qcrypto_aead_decrypt_des_cbc,
- .init = _qcrypto_cra_aead_sha256_init,
- .exit = _qcrypto_cra_aead_exit,
- },
- {
- .base = {
- .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
- .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .setkey = _qcrypto_aead_setkey,
- .setauthsize = _qcrypto_aead_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_3des_cbc,
- .decrypt = _qcrypto_aead_decrypt_3des_cbc,
- .init = _qcrypto_cra_aead_sha256_init,
- .exit = _qcrypto_cra_aead_exit,
- },
- };
- static struct aead_alg _qcrypto_aead_ccm_algo = {
- .base = {
- .cra_name = "ccm(aes)",
- .cra_driver_name = "qcrypto-aes-ccm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = _qcrypto_aead_ccm_setkey,
- .setauthsize = _qcrypto_aead_ccm_setauthsize,
- .encrypt = _qcrypto_aead_encrypt_aes_ccm,
- .decrypt = _qcrypto_aead_decrypt_aes_ccm,
- .init = _qcrypto_cra_aead_ccm_init,
- .exit = _qcrypto_cra_aead_exit,
- };
- static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = {
- .base = {
- .cra_name = "rfc4309(ccm(aes))",
- .cra_driver_name = "qcrypto-rfc4309-aes-ccm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .ivsize = 8,
- .maxauthsize = 16,
- .setkey = _qcrypto_aead_rfc4309_ccm_setkey,
- .setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
- .encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
- .decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
- .init = _qcrypto_cra_aead_rfc4309_ccm_init,
- .exit = _qcrypto_cra_aead_exit,
- };
- static int _qcrypto_probe(struct platform_device *pdev)
- {
- int rc = 0;
- void *handle;
- struct crypto_priv *cp = &qcrypto_dev;
- int i;
- struct msm_ce_hw_support *platform_support;
- struct crypto_engine *pengine;
- unsigned long flags;
- struct qcrypto_req_control *pqcrypto_req_control = NULL;
- pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
- if (!pengine)
- return -ENOMEM;
- cp->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
- msm_bus_cl_get_pdata(pdev);
- if (!cp->platform_support.bus_scale_table) {
- dev_err(&pdev->dev, "bus_scale_table is NULL\n");
- pengine->bw_state = BUS_HAS_BANDWIDTH;
- } else {
- pengine->bus_scale_handle = msm_bus_scale_register_client(
- (struct msm_bus_scale_pdata *)
- cp->platform_support.bus_scale_table);
- if (!pengine->bus_scale_handle) {
- dev_err(&pdev->dev, "failed to get bus scale handle\n");
- rc = -ENOMEM;
- goto exit_kzfree;
- }
- pengine->bw_state = BUS_NO_BANDWIDTH;
- }
- rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 1);
- if (rc) {
- dev_err(&pdev->dev, "failed to set high bandwidth\n");
- goto exit_kzfree;
- }
- handle = qce_open(pdev, &rc);
- if (handle == NULL) {
- rc = -ENODEV;
- goto exit_free_pdata;
- }
- rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
- if (rc) {
- dev_err(&pdev->dev, "failed to set low bandwidth\n");
- goto exit_qce_close;
- }
- platform_set_drvdata(pdev, pengine);
- pengine->qce = handle;
- pengine->pcp = cp;
- pengine->pdev = pdev;
- pengine->signature = 0xdeadbeef;
- init_timer(&(pengine->bw_reaper_timer));
- INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
- pengine->bw_reaper_timer.function =
- qcrypto_bw_reaper_timer_callback;
- INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
- pengine->high_bw_req = false;
- pengine->active_seq = 0;
- pengine->last_active_seq = 0;
- pengine->check_flag = false;
- pengine->max_req_used = 0;
- pengine->issue_req = false;
- crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
- mutex_lock(&cp->engine_lock);
- cp->total_units++;
- pengine->unit = cp->total_units;
- spin_lock_irqsave(&cp->lock, flags);
- pengine->first_engine = list_empty(&cp->engine_list);
- if (pengine->first_engine)
- cp->first_engine = pengine;
- list_add_tail(&pengine->elist, &cp->engine_list);
- cp->next_engine = pengine;
- spin_unlock_irqrestore(&cp->lock, flags);
- qce_hw_support(pengine->qce, &cp->ce_support);
- pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
- pengine->max_req = cp->ce_support.max_request;
- pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) *
- pengine->max_req, GFP_KERNEL);
- if (pqcrypto_req_control == NULL) {
- rc = -ENOMEM;
- goto exit_unlock_mutex;
- }
- qcrypto_init_req_control(pengine, pqcrypto_req_control);
- if (cp->ce_support.bam) {
- cp->platform_support.ce_shared = cp->ce_support.is_shared;
- cp->platform_support.shared_ce_resource = 0;
- cp->platform_support.hw_key_support = cp->ce_support.hw_key;
- cp->platform_support.sha_hmac = 1;
- pengine->ce_device = cp->ce_support.ce_device;
- } else {
- platform_support =
- (struct msm_ce_hw_support *)pdev->dev.platform_data;
- cp->platform_support.ce_shared = platform_support->ce_shared;
- cp->platform_support.shared_ce_resource =
- platform_support->shared_ce_resource;
- cp->platform_support.hw_key_support =
- platform_support->hw_key_support;
- cp->platform_support.sha_hmac = platform_support->sha_hmac;
- }
- if (cp->total_units != 1)
- goto exit_unlock_mutex;
- /* register crypto cipher algorithms the device supports */
- for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
- struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_cipher_alg_alloc(cp,
- &_qcrypto_ablk_cipher_algos[i]);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->cipher_alg.cra_name,
- strlen(q_alg->cipher_alg.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->cipher_alg.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_alg(&q_alg->cipher_alg);
- if (rc) {
- dev_err(&pdev->dev, "%s alg registration failed\n",
- q_alg->cipher_alg.cra_driver_name);
- kzfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->cipher_alg.cra_driver_name);
- }
- }
- /* register crypto cipher algorithms the device supports */
- if (cp->ce_support.aes_xts) {
- struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_cipher_alg_alloc(cp,
- &_qcrypto_ablk_cipher_xts_algo);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_aes_xts_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->cipher_alg.cra_name,
- strlen(q_alg->cipher_alg.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->cipher_alg.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_alg(&q_alg->cipher_alg);
- if (rc) {
- dev_err(&pdev->dev, "%s alg registration failed\n",
- q_alg->cipher_alg.cra_driver_name);
- kzfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->cipher_alg.cra_driver_name);
- }
- }
- /*
- * Register crypto hash (sha1 and sha256) algorithms the
- * device supports
- */
- for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
- struct qcrypto_alg *q_alg = NULL;
- q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_ahash_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->sha_alg.halg.base.cra_name,
- strlen(q_alg->sha_alg.halg.base.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->sha_alg.halg.base.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_ahash(&q_alg->sha_alg);
- if (rc) {
- dev_err(&pdev->dev, "%s alg registration failed\n",
- q_alg->sha_alg.halg.base.cra_driver_name);
- kzfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->sha_alg.halg.base.cra_driver_name);
- }
- }
- /* register crypto aead (hmac-sha1) algorithms the device supports */
- if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
- || cp->ce_support.sha_hmac) {
- for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
- i++) {
- struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_aead_alg_alloc(cp,
- &_qcrypto_aead_sha1_hmac_algos[i]);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_aead_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->aead_alg.base.cra_name,
- strlen(q_alg->aead_alg.base.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->aead_alg.base.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_aead(&q_alg->aead_alg);
- if (rc) {
- dev_err(&pdev->dev,
- "%s alg registration failed\n",
- q_alg->aead_alg.base.cra_driver_name);
- kfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->aead_alg.base.cra_driver_name);
- }
- }
- }
- /* register crypto aead (hmac-sha256) algorithms the device supports */
- if (cp->ce_support.sha_hmac) {
- for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos);
- i++) {
- struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_aead_alg_alloc(cp,
- &_qcrypto_aead_sha256_hmac_algos[i]);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_aead_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->aead_alg.base.cra_name,
- strlen(q_alg->aead_alg.base.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->aead_alg.base.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_aead(&q_alg->aead_alg);
- if (rc) {
- dev_err(&pdev->dev,
- "%s alg registration failed\n",
- q_alg->aead_alg.base.cra_driver_name);
- kfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->aead_alg.base.cra_driver_name);
- }
- }
- }
- if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
- /* register crypto hmac algorithms the device supports */
- for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
- struct qcrypto_alg *q_alg = NULL;
- q_alg = _qcrypto_sha_alg_alloc(cp,
- &_qcrypto_sha_hmac_algos[i]);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_hmac_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->sha_alg.halg.base.cra_name,
- strlen(
- q_alg->sha_alg.halg.base.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->sha_alg.halg.base.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_ahash(&q_alg->sha_alg);
- if (rc) {
- dev_err(&pdev->dev,
- "%s alg registration failed\n",
- q_alg->sha_alg.halg.base.cra_driver_name);
- kzfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->sha_alg.halg.base.cra_driver_name);
- }
- }
- }
- /*
- * Register crypto cipher (aes-ccm) algorithms the
- * device supports
- */
- if (cp->ce_support.aes_ccm) {
- struct qcrypto_alg *q_alg;
- q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_aes_ccm_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->aead_alg.base.cra_name,
- strlen(q_alg->aead_alg.base.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->aead_alg.base.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_aead(&q_alg->aead_alg);
- if (rc) {
- dev_err(&pdev->dev, "%s alg registration failed\n",
- q_alg->aead_alg.base.cra_driver_name);
- kzfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->aead_alg.base.cra_driver_name);
- }
- q_alg = _qcrypto_aead_alg_alloc(cp,
- &_qcrypto_aead_rfc4309_ccm_algo);
- if (IS_ERR(q_alg)) {
- rc = PTR_ERR(q_alg);
- goto err;
- }
- if (cp->ce_support.use_sw_aes_ccm_algo) {
- rc = _qcrypto_prefix_alg_cra_name(
- q_alg->aead_alg.base.cra_name,
- strlen(q_alg->aead_alg.base.cra_name));
- if (rc) {
- dev_err(&pdev->dev,
- "The algorithm name %s is too long.\n",
- q_alg->aead_alg.base.cra_name);
- kfree(q_alg);
- goto err;
- }
- }
- rc = crypto_register_aead(&q_alg->aead_alg);
- if (rc) {
- dev_err(&pdev->dev, "%s alg registration failed\n",
- q_alg->aead_alg.base.cra_driver_name);
- kfree(q_alg);
- } else {
- list_add_tail(&q_alg->entry, &cp->alg_list);
- dev_info(&pdev->dev, "%s\n",
- q_alg->aead_alg.base.cra_driver_name);
- }
- }
- mutex_unlock(&cp->engine_lock);
- return 0;
- err:
- _qcrypto_remove_engine(pengine);
- kzfree(pqcrypto_req_control);
- exit_unlock_mutex:
- mutex_unlock(&cp->engine_lock);
- exit_qce_close:
- if (pengine->qce)
- qce_close(pengine->qce);
- exit_free_pdata:
- msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
- platform_set_drvdata(pdev, NULL);
- exit_kzfree:
- kzfree(pengine);
- return rc;
- };
- static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
- {
- struct crypto_priv *cp = pengine->pcp;
- if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
- || cp->req_queue.qlen)
- return 1;
- return 0;
- }
- static void _qcrypto_do_suspending(struct crypto_engine *pengine)
- {
- struct crypto_priv *cp = pengine->pcp;
- if (cp->platform_support.bus_scale_table == NULL)
- return;
- del_timer_sync(&pengine->bw_reaper_timer);
- qcrypto_ce_set_bus(pengine, false);
- }
- static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
- {
- int ret = 0;
- struct crypto_engine *pengine;
- struct crypto_priv *cp;
- unsigned long flags;
- pengine = platform_get_drvdata(pdev);
- if (!pengine)
- return -EINVAL;
- /*
- * Check if this platform supports clock management in suspend/resume
- * If not, just simply return 0.
- */
- cp = pengine->pcp;
- if (!cp->ce_support.clk_mgmt_sus_res)
- return 0;
- spin_lock_irqsave(&cp->lock, flags);
- switch (pengine->bw_state) {
- case BUS_NO_BANDWIDTH:
- if (pengine->high_bw_req == false)
- pengine->bw_state = BUS_SUSPENDED;
- else
- ret = -EBUSY;
- break;
- case BUS_HAS_BANDWIDTH:
- if (_qcrypto_engine_in_use(pengine)) {
- ret = -EBUSY;
- } else {
- pengine->bw_state = BUS_SUSPENDING;
- spin_unlock_irqrestore(&cp->lock, flags);
- _qcrypto_do_suspending(pengine);
- spin_lock_irqsave(&cp->lock, flags);
- pengine->bw_state = BUS_SUSPENDED;
- }
- break;
- case BUS_BANDWIDTH_RELEASING:
- case BUS_BANDWIDTH_ALLOCATING:
- case BUS_SUSPENDED:
- case BUS_SUSPENDING:
- default:
- ret = -EBUSY;
- break;
- }
- spin_unlock_irqrestore(&cp->lock, flags);
- if (ret)
- return ret;
- if (qce_pm_table.suspend) {
- qcrypto_ce_set_bus(pengine, true);
- qce_pm_table.suspend(pengine->qce);
- qcrypto_ce_set_bus(pengine, false);
- }
- return 0;
- }
- static int _qcrypto_resume(struct platform_device *pdev)
- {
- struct crypto_engine *pengine;
- struct crypto_priv *cp;
- unsigned long flags;
- int ret = 0;
- pengine = platform_get_drvdata(pdev);
- if (!pengine)
- return -EINVAL;
- cp = pengine->pcp;
- if (!cp->ce_support.clk_mgmt_sus_res)
- return 0;
- spin_lock_irqsave(&cp->lock, flags);
- if (pengine->bw_state == BUS_SUSPENDED) {
- spin_unlock_irqrestore(&cp->lock, flags);
- if (qce_pm_table.resume) {
- qcrypto_ce_set_bus(pengine, true);
- qce_pm_table.resume(pengine->qce);
- qcrypto_ce_set_bus(pengine, false);
- }
- spin_lock_irqsave(&cp->lock, flags);
- pengine->bw_state = BUS_NO_BANDWIDTH;
- pengine->active_seq++;
- pengine->check_flag = false;
- if (cp->req_queue.qlen || pengine->req_queue.qlen) {
- if (pengine->high_bw_req == false) {
- qcrypto_ce_bw_allocate_req(pengine);
- pengine->high_bw_req = true;
- }
- }
- } else
- ret = -EBUSY;
- spin_unlock_irqrestore(&cp->lock, flags);
- return ret;
- }
- static const struct of_device_id qcrypto_match[] = {
- { .compatible = "qcom,qcrypto",
- },
- {}
- };
- static struct platform_driver __qcrypto = {
- .probe = _qcrypto_probe,
- .remove = _qcrypto_remove,
- .suspend = _qcrypto_suspend,
- .resume = _qcrypto_resume,
- .driver = {
- .owner = THIS_MODULE,
- .name = "qcrypto",
- .of_match_table = qcrypto_match,
- },
- };
- static int _debug_qcrypto;
- static int _debug_stats_open(struct inode *inode, struct file *file)
- {
- file->private_data = inode->i_private;
- return 0;
- }
- static ssize_t _debug_stats_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
- {
- int rc = -EINVAL;
- int qcrypto = *((int *) file->private_data);
- int len;
- len = _disp_stats(qcrypto);
- if (len <= count)
- rc = simple_read_from_buffer((void __user *) buf, len,
- ppos, (void *) _debug_read_buf, len);
- return rc;
- }
- static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
- {
- unsigned long flags;
- struct crypto_priv *cp = &qcrypto_dev;
- struct crypto_engine *pe;
- int i;
- memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
- spin_lock_irqsave(&cp->lock, flags);
- list_for_each_entry(pe, &cp->engine_list, elist) {
- pe->total_req = 0;
- pe->err_req = 0;
- qce_clear_driver_stats(pe->qce);
- pe->max_req_used = 0;
- }
- cp->max_qlen = 0;
- cp->resp_start = 0;
- cp->resp_stop = 0;
- cp->no_avail = 0;
- cp->max_resp_qlen = 0;
- cp->queue_work_eng3 = 0;
- cp->queue_work_not_eng3 = 0;
- cp->queue_work_not_eng3_nz = 0;
- cp->max_reorder_cnt = 0;
- for (i = 0; i < MAX_SMP_CPU + 1; i++)
- cp->cpu_req[i] = 0;
- spin_unlock_irqrestore(&cp->lock, flags);
- return count;
- }
- static const struct file_operations _debug_stats_ops = {
- .open = _debug_stats_open,
- .read = _debug_stats_read,
- .write = _debug_stats_write,
- };
- static int _qcrypto_debug_init(void)
- {
- int rc;
- char name[DEBUG_MAX_FNAME];
- struct dentry *dent;
- _debug_dent = debugfs_create_dir("qcrypto", NULL);
- if (IS_ERR(_debug_dent)) {
- pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
- PTR_ERR(_debug_dent));
- return PTR_ERR(_debug_dent);
- }
- snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
- _debug_qcrypto = 0;
- dent = debugfs_create_file(name, 0644, _debug_dent,
- &_debug_qcrypto, &_debug_stats_ops);
- if (dent == NULL) {
- pr_err("qcrypto debugfs_create_file fail, error %ld\n",
- PTR_ERR(dent));
- rc = PTR_ERR(dent);
- goto err;
- }
- return 0;
- err:
- debugfs_remove_recursive(_debug_dent);
- return rc;
- }
- static int __init _qcrypto_init(void)
- {
- int rc;
- struct crypto_priv *pcp = &qcrypto_dev;
- rc = _qcrypto_debug_init();
- if (rc)
- return rc;
- INIT_LIST_HEAD(&pcp->alg_list);
- INIT_LIST_HEAD(&pcp->engine_list);
- init_llist_head(&pcp->ordered_resp_list);
- spin_lock_init(&pcp->lock);
- mutex_init(&pcp->engine_lock);
- pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq",
- WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
- if (!pcp->resp_wq) {
- pr_err("Error allocating workqueue\n");
- return -ENOMEM;
- }
- INIT_WORK(&pcp->resp_work, seq_response);
- pcp->total_units = 0;
- pcp->platform_support.bus_scale_table = NULL;
- pcp->next_engine = NULL;
- pcp->scheduled_eng = NULL;
- pcp->ce_req_proc_sts = IN_PROGRESS;
- crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
- return platform_driver_register(&__qcrypto);
- }
- static void __exit _qcrypto_exit(void)
- {
- pr_debug("%s Unregister QCRYPTO\n", __func__);
- debugfs_remove_recursive(_debug_dent);
- platform_driver_unregister(&__qcrypto);
- }
- module_init(_qcrypto_init);
- module_exit(_qcrypto_exit);
- MODULE_LICENSE("GPL v2");
- MODULE_DESCRIPTION("QTI Crypto driver");
|