123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508 |
- /*
- * Copyright © 2006-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * Authors: David Woodhouse <[email protected]>,
- * Ashok Raj <[email protected]>,
- * Shaohua Li <[email protected]>,
- * Anil S Keshavamurthy <[email protected]>,
- * Fenghua Yu <[email protected]>
- * Joerg Roedel <[email protected]>
- */
- #define pr_fmt(fmt) "DMAR: " fmt
- #include <linux/init.h>
- #include <linux/bitmap.h>
- #include <linux/debugfs.h>
- #include <linux/export.h>
- #include <linux/slab.h>
- #include <linux/irq.h>
- #include <linux/interrupt.h>
- #include <linux/spinlock.h>
- #include <linux/pci.h>
- #include <linux/dmar.h>
- #include <linux/dma-mapping.h>
- #include <linux/mempool.h>
- #include <linux/memory.h>
- #include <linux/cpu.h>
- #include <linux/timer.h>
- #include <linux/io.h>
- #include <linux/iova.h>
- #include <linux/iommu.h>
- #include <linux/intel-iommu.h>
- #include <linux/syscore_ops.h>
- #include <linux/tboot.h>
- #include <linux/dmi.h>
- #include <linux/pci-ats.h>
- #include <linux/memblock.h>
- #include <linux/dma-contiguous.h>
- #include <linux/crash_dump.h>
- #include <asm/irq_remapping.h>
- #include <asm/cacheflush.h>
- #include <asm/iommu.h>
- #include "irq_remapping.h"
- #define ROOT_SIZE VTD_PAGE_SIZE
- #define CONTEXT_SIZE VTD_PAGE_SIZE
- #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
- #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
- #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
- #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
- #define IOAPIC_RANGE_START (0xfee00000)
- #define IOAPIC_RANGE_END (0xfeefffff)
- #define IOVA_START_ADDR (0x1000)
- #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
- #define MAX_AGAW_WIDTH 64
- #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
- #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
- #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
- /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
- to match. That way, we can use 'unsigned long' for PFNs with impunity. */
- #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
- __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
- #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
- /* IO virtual address start page frame number */
- #define IOVA_START_PFN (1)
- #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
- #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
- #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
- /* page table handling */
- #define LEVEL_STRIDE (9)
- #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
- /*
- * This bitmap is used to advertise the page sizes our hardware support
- * to the IOMMU core, which will then use this information to split
- * physically contiguous memory regions it is mapping into page sizes
- * that we support.
- *
- * Traditionally the IOMMU core just handed us the mappings directly,
- * after making sure the size is an order of a 4KiB page and that the
- * mapping has natural alignment.
- *
- * To retain this behavior, we currently advertise that we support
- * all page sizes that are an order of 4KiB.
- *
- * If at some point we'd like to utilize the IOMMU core's new behavior,
- * we could change this to advertise the real page sizes we support.
- */
- #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
- static inline int agaw_to_level(int agaw)
- {
- return agaw + 2;
- }
- static inline int agaw_to_width(int agaw)
- {
- return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
- }
- static inline int width_to_agaw(int width)
- {
- return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
- }
- static inline unsigned int level_to_offset_bits(int level)
- {
- return (level - 1) * LEVEL_STRIDE;
- }
- static inline int pfn_level_offset(unsigned long pfn, int level)
- {
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
- }
- static inline unsigned long level_mask(int level)
- {
- return -1UL << level_to_offset_bits(level);
- }
- static inline unsigned long level_size(int level)
- {
- return 1UL << level_to_offset_bits(level);
- }
- static inline unsigned long align_to_level(unsigned long pfn, int level)
- {
- return (pfn + level_size(level) - 1) & level_mask(level);
- }
- static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
- {
- return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
- }
- /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
- static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
- {
- return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
- }
- static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
- {
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
- }
- static inline unsigned long page_to_dma_pfn(struct page *pg)
- {
- return mm_to_dma_pfn(page_to_pfn(pg));
- }
- static inline unsigned long virt_to_dma_pfn(void *p)
- {
- return page_to_dma_pfn(virt_to_page(p));
- }
- /* global iommu list, set NULL for ignored DMAR units */
- static struct intel_iommu **g_iommus;
- static void __init check_tylersburg_isoch(void);
- static int rwbf_quirk;
- /*
- * set to 1 to panic kernel if can't successfully enable VT-d
- * (used when kernel is launched w/ TXT)
- */
- static int force_on = 0;
- /*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
- struct root_entry {
- u64 lo;
- u64 hi;
- };
- #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
- /*
- * Take a root_entry and return the Lower Context Table Pointer (LCTP)
- * if marked present.
- */
- static phys_addr_t root_entry_lctp(struct root_entry *re)
- {
- if (!(re->lo & 1))
- return 0;
- return re->lo & VTD_PAGE_MASK;
- }
- /*
- * Take a root_entry and return the Upper Context Table Pointer (UCTP)
- * if marked present.
- */
- static phys_addr_t root_entry_uctp(struct root_entry *re)
- {
- if (!(re->hi & 1))
- return 0;
- return re->hi & VTD_PAGE_MASK;
- }
- /*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
- struct context_entry {
- u64 lo;
- u64 hi;
- };
- static inline void context_clear_pasid_enable(struct context_entry *context)
- {
- context->lo &= ~(1ULL << 11);
- }
- static inline bool context_pasid_enabled(struct context_entry *context)
- {
- return !!(context->lo & (1ULL << 11));
- }
- static inline void context_set_copied(struct context_entry *context)
- {
- context->hi |= (1ull << 3);
- }
- static inline bool context_copied(struct context_entry *context)
- {
- return !!(context->hi & (1ULL << 3));
- }
- static inline bool __context_present(struct context_entry *context)
- {
- return (context->lo & 1);
- }
- static inline bool context_present(struct context_entry *context)
- {
- return context_pasid_enabled(context) ?
- __context_present(context) :
- __context_present(context) && !context_copied(context);
- }
- static inline void context_set_present(struct context_entry *context)
- {
- context->lo |= 1;
- }
- static inline void context_set_fault_enable(struct context_entry *context)
- {
- context->lo &= (((u64)-1) << 2) | 1;
- }
- static inline void context_set_translation_type(struct context_entry *context,
- unsigned long value)
- {
- context->lo &= (((u64)-1) << 4) | 3;
- context->lo |= (value & 3) << 2;
- }
- static inline void context_set_address_root(struct context_entry *context,
- unsigned long value)
- {
- context->lo &= ~VTD_PAGE_MASK;
- context->lo |= value & VTD_PAGE_MASK;
- }
- static inline void context_set_address_width(struct context_entry *context,
- unsigned long value)
- {
- context->hi |= value & 7;
- }
- static inline void context_set_domain_id(struct context_entry *context,
- unsigned long value)
- {
- context->hi |= (value & ((1 << 16) - 1)) << 8;
- }
- static inline int context_domain_id(struct context_entry *c)
- {
- return((c->hi >> 8) & 0xffff);
- }
- static inline void context_clear_entry(struct context_entry *context)
- {
- context->lo = 0;
- context->hi = 0;
- }
- /*
- * 0: readable
- * 1: writable
- * 2-6: reserved
- * 7: super page
- * 8-10: available
- * 11: snoop behavior
- * 12-63: Host physcial address
- */
- struct dma_pte {
- u64 val;
- };
- static inline void dma_clear_pte(struct dma_pte *pte)
- {
- pte->val = 0;
- }
- static inline u64 dma_pte_addr(struct dma_pte *pte)
- {
- #ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK;
- #else
- /* Must have a full atomic 64-bit read */
- return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
- #endif
- }
- static inline bool dma_pte_present(struct dma_pte *pte)
- {
- return (pte->val & 3) != 0;
- }
- static inline bool dma_pte_superpage(struct dma_pte *pte)
- {
- return (pte->val & DMA_PTE_LARGE_PAGE);
- }
- static inline int first_pte_in_page(struct dma_pte *pte)
- {
- return !((unsigned long)pte & ~VTD_PAGE_MASK);
- }
- /*
- * This domain is a statically identity mapping domain.
- * 1. This domain creats a static 1:1 mapping to all usable memory.
- * 2. It maps to each iommu if successful.
- * 3. Each iommu mapps to this domain if successful.
- */
- static struct dmar_domain *si_domain;
- static int hw_pass_through = 1;
- /*
- * Domain represents a virtual machine, more than one devices
- * across iommus may be owned in one domain, e.g. kvm guest.
- */
- #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
- /* si_domain contains mulitple devices */
- #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
- #define for_each_domain_iommu(idx, domain) \
- for (idx = 0; idx < g_num_of_iommus; idx++) \
- if (domain->iommu_refcnt[idx])
- struct dmar_domain {
- int nid; /* node id */
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
- * domain ids are 16 bit wide according
- * to VT-d spec, section 9.3 */
- bool has_iotlb_device;
- struct list_head devices; /* all devices' list */
- struct iova_domain iovad; /* iova's that belong to this domain */
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
- int flags; /* flags to find out type of domain */
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- u64 max_addr; /* maximum mapped address */
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
- };
- /* PCI domain-device relationship */
- struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- u16 pfsid; /* SRIOV physical function source ID */
- u8 pasid_supported:3;
- u8 pasid_enabled:1;
- u8 pri_supported:1;
- u8 pri_enabled:1;
- u8 ats_supported:1;
- u8 ats_enabled:1;
- u8 ats_qdep;
- struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
- };
- struct dmar_rmrr_unit {
- struct list_head list; /* list of rmrr units */
- struct acpi_dmar_header *hdr; /* ACPI header */
- u64 base_address; /* reserved base address*/
- u64 end_address; /* reserved end address */
- struct dmar_dev_scope *devices; /* target devices */
- int devices_cnt; /* target device count */
- };
- struct dmar_atsr_unit {
- struct list_head list; /* list of ATSR units */
- struct acpi_dmar_header *hdr; /* ACPI header */
- struct dmar_dev_scope *devices; /* target devices */
- int devices_cnt; /* target device count */
- u8 include_all:1; /* include all ports */
- };
- static LIST_HEAD(dmar_atsr_units);
- static LIST_HEAD(dmar_rmrr_units);
- #define for_each_rmrr_units(rmrr) \
- list_for_each_entry(rmrr, &dmar_rmrr_units, list)
- static void flush_unmaps_timeout(unsigned long data);
- struct deferred_flush_entry {
- unsigned long iova_pfn;
- unsigned long nrpages;
- struct dmar_domain *domain;
- struct page *freelist;
- };
- #define HIGH_WATER_MARK 250
- struct deferred_flush_table {
- int next;
- struct deferred_flush_entry entries[HIGH_WATER_MARK];
- };
- struct deferred_flush_data {
- spinlock_t lock;
- int timer_on;
- struct timer_list timer;
- long size;
- struct deferred_flush_table *tables;
- };
- DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
- /* bitmap for indexing intel_iommus */
- static int g_num_of_iommus;
- static void domain_exit(struct dmar_domain *domain);
- static void domain_remove_dev_info(struct dmar_domain *domain);
- static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev);
- static void __dmar_remove_one_dev_info(struct device_domain_info *info);
- static void domain_context_clear(struct intel_iommu *iommu,
- struct device *dev);
- static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu);
- #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
- int dmar_disabled = 0;
- #else
- int dmar_disabled = 1;
- #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
- int intel_iommu_enabled = 0;
- EXPORT_SYMBOL_GPL(intel_iommu_enabled);
- static int dmar_map_gfx = 1;
- static int dmar_forcedac;
- static int intel_iommu_strict;
- static int intel_iommu_superpage = 1;
- static int intel_iommu_ecs = 1;
- static int intel_iommu_pasid28;
- static int iommu_identity_mapping;
- #define IDENTMAP_ALL 1
- #define IDENTMAP_GFX 2
- #define IDENTMAP_AZALIA 4
- /* Broadwell and Skylake have broken ECS support — normal so-called "second
- * level" translation of DMA requests-without-PASID doesn't actually happen
- * unless you also set the NESTE bit in an extended context-entry. Which of
- * course means that SVM doesn't work because it's trying to do nested
- * translation of the physical addresses it finds in the process page tables,
- * through the IOVA->phys mapping found in the "second level" page tables.
- *
- * The VT-d specification was retroactively changed to change the definition
- * of the capability bits and pretend that Broadwell/Skylake never happened...
- * but unfortunately the wrong bit was changed. It's ECS which is broken, but
- * for some reason it was the PASID capability bit which was redefined (from
- * bit 28 on BDW/SKL to bit 40 in future).
- *
- * So our test for ECS needs to eschew those implementations which set the old
- * PASID capabiity bit 28, since those are the ones on which ECS is broken.
- * Unless we are working around the 'pasid28' limitations, that is, by putting
- * the device into passthrough mode for normal DMA and thus masking the bug.
- */
- #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
- (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
- /* PASID support is thus enabled if ECS is enabled and *either* of the old
- * or new capability bits are set. */
- #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
- (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
- int intel_iommu_gfx_mapped;
- EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
- #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
- static DEFINE_SPINLOCK(device_domain_lock);
- static LIST_HEAD(device_domain_list);
- static const struct iommu_ops intel_iommu_ops;
- static bool translation_pre_enabled(struct intel_iommu *iommu)
- {
- return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
- }
- static void clear_translation_pre_enabled(struct intel_iommu *iommu)
- {
- iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
- }
- static void init_translation_status(struct intel_iommu *iommu)
- {
- u32 gsts;
- gsts = readl(iommu->reg + DMAR_GSTS_REG);
- if (gsts & DMA_GSTS_TES)
- iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
- }
- /* Convert generic 'struct iommu_domain to private struct dmar_domain */
- static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
- {
- return container_of(dom, struct dmar_domain, domain);
- }
- static int __init intel_iommu_setup(char *str)
- {
- if (!str)
- return -EINVAL;
- while (*str) {
- if (!strncmp(str, "on", 2)) {
- dmar_disabled = 0;
- pr_info("IOMMU enabled\n");
- } else if (!strncmp(str, "off", 3)) {
- dmar_disabled = 1;
- pr_info("IOMMU disabled\n");
- } else if (!strncmp(str, "igfx_off", 8)) {
- dmar_map_gfx = 0;
- pr_info("Disable GFX device mapping\n");
- } else if (!strncmp(str, "forcedac", 8)) {
- pr_info("Forcing DAC for PCI devices\n");
- dmar_forcedac = 1;
- } else if (!strncmp(str, "strict", 6)) {
- pr_info("Disable batched IOTLB flush\n");
- intel_iommu_strict = 1;
- } else if (!strncmp(str, "sp_off", 6)) {
- pr_info("Disable supported super page\n");
- intel_iommu_superpage = 0;
- } else if (!strncmp(str, "ecs_off", 7)) {
- printk(KERN_INFO
- "Intel-IOMMU: disable extended context table support\n");
- intel_iommu_ecs = 0;
- } else if (!strncmp(str, "pasid28", 7)) {
- printk(KERN_INFO
- "Intel-IOMMU: enable pre-production PASID support\n");
- intel_iommu_pasid28 = 1;
- iommu_identity_mapping |= IDENTMAP_GFX;
- }
- str += strcspn(str, ",");
- while (*str == ',')
- str++;
- }
- return 0;
- }
- __setup("intel_iommu=", intel_iommu_setup);
- static struct kmem_cache *iommu_domain_cache;
- static struct kmem_cache *iommu_devinfo_cache;
- static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
- {
- struct dmar_domain **domains;
- int idx = did >> 8;
- domains = iommu->domains[idx];
- if (!domains)
- return NULL;
- return domains[did & 0xff];
- }
- static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
- struct dmar_domain *domain)
- {
- struct dmar_domain **domains;
- int idx = did >> 8;
- if (!iommu->domains[idx]) {
- size_t size = 256 * sizeof(struct dmar_domain *);
- iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
- }
- domains = iommu->domains[idx];
- if (WARN_ON(!domains))
- return;
- else
- domains[did & 0xff] = domain;
- }
- static inline void *alloc_pgtable_page(int node)
- {
- struct page *page;
- void *vaddr = NULL;
- page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
- if (page)
- vaddr = page_address(page);
- return vaddr;
- }
- static inline void free_pgtable_page(void *vaddr)
- {
- free_page((unsigned long)vaddr);
- }
- static inline void *alloc_domain_mem(void)
- {
- return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
- }
- static void free_domain_mem(void *vaddr)
- {
- kmem_cache_free(iommu_domain_cache, vaddr);
- }
- static inline void * alloc_devinfo_mem(void)
- {
- return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
- }
- static inline void free_devinfo_mem(void *vaddr)
- {
- kmem_cache_free(iommu_devinfo_cache, vaddr);
- }
- static inline int domain_type_is_vm(struct dmar_domain *domain)
- {
- return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
- }
- static inline int domain_type_is_si(struct dmar_domain *domain)
- {
- return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
- }
- static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
- {
- return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
- DOMAIN_FLAG_STATIC_IDENTITY);
- }
- static inline int domain_pfn_supported(struct dmar_domain *domain,
- unsigned long pfn)
- {
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
- }
- static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
- {
- unsigned long sagaw;
- int agaw = -1;
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
- if (test_bit(agaw, &sagaw))
- break;
- }
- return agaw;
- }
- /*
- * Calculate max SAGAW for each iommu.
- */
- int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
- {
- return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
- }
- /*
- * calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
- int iommu_calculate_agaw(struct intel_iommu *iommu)
- {
- return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- }
- /* This functionin only returns single iommu in a domain */
- static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
- {
- int iommu_id;
- /* si_domain and vm domain should not get here. */
- BUG_ON(domain_type_is_vm_or_si(domain));
- for_each_domain_iommu(iommu_id, domain)
- break;
- if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
- return NULL;
- return g_iommus[iommu_id];
- }
- static void domain_update_iommu_coherency(struct dmar_domain *domain)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool found = false;
- int i;
- domain->iommu_coherency = 1;
- for_each_domain_iommu(i, domain) {
- found = true;
- if (!ecap_coherent(g_iommus[i]->ecap)) {
- domain->iommu_coherency = 0;
- break;
- }
- }
- if (found)
- return;
- /* No hardware attached; use lowest common denominator */
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!ecap_coherent(iommu->ecap)) {
- domain->iommu_coherency = 0;
- break;
- }
- }
- rcu_read_unlock();
- }
- static int domain_update_iommu_snooping(struct intel_iommu *skip)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- int ret = 1;
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (iommu != skip) {
- if (!ecap_sc_support(iommu->ecap)) {
- ret = 0;
- break;
- }
- }
- }
- rcu_read_unlock();
- return ret;
- }
- static int domain_update_iommu_superpage(struct intel_iommu *skip)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- int mask = 0xf;
- if (!intel_iommu_superpage) {
- return 0;
- }
- /* set iommu_superpage to the smallest common denominator */
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (iommu != skip) {
- mask &= cap_super_page_val(iommu->cap);
- if (!mask)
- break;
- }
- }
- rcu_read_unlock();
- return fls(mask);
- }
- /* Some capabilities may be different across iommus */
- static void domain_update_iommu_cap(struct dmar_domain *domain)
- {
- domain_update_iommu_coherency(domain);
- domain->iommu_snooping = domain_update_iommu_snooping(NULL);
- domain->iommu_superpage = domain_update_iommu_superpage(NULL);
- }
- static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
- u8 bus, u8 devfn, int alloc)
- {
- struct root_entry *root = &iommu->root_entry[bus];
- struct context_entry *context;
- u64 *entry;
- entry = &root->lo;
- if (ecs_enabled(iommu)) {
- if (devfn >= 0x80) {
- devfn -= 0x80;
- entry = &root->hi;
- }
- devfn *= 2;
- }
- if (*entry & 1)
- context = phys_to_virt(*entry & VTD_PAGE_MASK);
- else {
- unsigned long phy_addr;
- if (!alloc)
- return NULL;
- context = alloc_pgtable_page(iommu->node);
- if (!context)
- return NULL;
- __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
- phy_addr = virt_to_phys((void *)context);
- *entry = phy_addr | 1;
- __iommu_flush_cache(iommu, entry, sizeof(*entry));
- }
- return &context[devfn];
- }
- static int iommu_dummy(struct device *dev)
- {
- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
- }
- static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
- {
- struct dmar_drhd_unit *drhd = NULL;
- struct intel_iommu *iommu;
- struct device *tmp;
- struct pci_dev *ptmp, *pdev = NULL;
- u16 segment = 0;
- int i;
- if (iommu_dummy(dev))
- return NULL;
- if (dev_is_pci(dev)) {
- struct pci_dev *pf_pdev;
- pdev = to_pci_dev(dev);
- /* VFs aren't listed in scope tables; we need to look up
- * the PF instead to find the IOMMU. */
- pf_pdev = pci_physfn(pdev);
- dev = &pf_pdev->dev;
- segment = pci_domain_nr(pdev->bus);
- } else if (has_acpi_companion(dev))
- dev = &ACPI_COMPANION(dev)->dev;
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (pdev && segment != drhd->segment)
- continue;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, tmp) {
- if (tmp == dev) {
- /* For a VF use its original BDF# not that of the PF
- * which we used for the IOMMU lookup. Strictly speaking
- * we could do this for all PCI devices; we only need to
- * get the BDF# from the scope table for ACPI matches. */
- if (pdev && pdev->is_virtfn)
- goto got_pdev;
- *bus = drhd->devices[i].bus;
- *devfn = drhd->devices[i].devfn;
- goto out;
- }
- if (!pdev || !dev_is_pci(tmp))
- continue;
- ptmp = to_pci_dev(tmp);
- if (ptmp->subordinate &&
- ptmp->subordinate->number <= pdev->bus->number &&
- ptmp->subordinate->busn_res.end >= pdev->bus->number)
- goto got_pdev;
- }
- if (pdev && drhd->include_all) {
- got_pdev:
- *bus = pdev->bus->number;
- *devfn = pdev->devfn;
- goto out;
- }
- }
- iommu = NULL;
- out:
- rcu_read_unlock();
- return iommu;
- }
- static void domain_flush_cache(struct dmar_domain *domain,
- void *addr, int size)
- {
- if (!domain->iommu_coherency)
- clflush_cache_range(addr, size);
- }
- static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
- {
- struct context_entry *context;
- int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
- context = iommu_context_addr(iommu, bus, devfn, 0);
- if (context)
- ret = context_present(context);
- spin_unlock_irqrestore(&iommu->lock, flags);
- return ret;
- }
- static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
- {
- struct context_entry *context;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
- context = iommu_context_addr(iommu, bus, devfn, 0);
- if (context) {
- context_clear_entry(context);
- __iommu_flush_cache(iommu, context, sizeof(*context));
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
- }
- static void free_context_table(struct intel_iommu *iommu)
- {
- int i;
- unsigned long flags;
- struct context_entry *context;
- spin_lock_irqsave(&iommu->lock, flags);
- if (!iommu->root_entry) {
- goto out;
- }
- for (i = 0; i < ROOT_ENTRY_NR; i++) {
- context = iommu_context_addr(iommu, i, 0, 0);
- if (context)
- free_pgtable_page(context);
- if (!ecs_enabled(iommu))
- continue;
- context = iommu_context_addr(iommu, i, 0x80, 0);
- if (context)
- free_pgtable_page(context);
- }
- free_pgtable_page(iommu->root_entry);
- iommu->root_entry = NULL;
- out:
- spin_unlock_irqrestore(&iommu->lock, flags);
- }
- static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn, int *target_level)
- {
- struct dma_pte *parent, *pte = NULL;
- int level = agaw_to_level(domain->agaw);
- int offset;
- BUG_ON(!domain->pgd);
- if (!domain_pfn_supported(domain, pfn))
- /* Address beyond IOMMU's addressing capabilities. */
- return NULL;
- parent = domain->pgd;
- while (1) {
- void *tmp_page;
- offset = pfn_level_offset(pfn, level);
- pte = &parent[offset];
- if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
- break;
- if (level == *target_level)
- break;
- if (!dma_pte_present(pte)) {
- uint64_t pteval;
- tmp_page = alloc_pgtable_page(domain->nid);
- if (!tmp_page)
- return NULL;
- domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (cmpxchg64(&pte->val, 0ULL, pteval))
- /* Someone else set it while we were thinking; use theirs. */
- free_pgtable_page(tmp_page);
- else
- domain_flush_cache(domain, pte, sizeof(*pte));
- }
- if (level == 1)
- break;
- parent = phys_to_virt(dma_pte_addr(pte));
- level--;
- }
- if (!*target_level)
- *target_level = level;
- return pte;
- }
- /* return address's pte at specific level */
- static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
- unsigned long pfn,
- int level, int *large_page)
- {
- struct dma_pte *parent, *pte = NULL;
- int total = agaw_to_level(domain->agaw);
- int offset;
- parent = domain->pgd;
- while (level <= total) {
- offset = pfn_level_offset(pfn, total);
- pte = &parent[offset];
- if (level == total)
- return pte;
- if (!dma_pte_present(pte)) {
- *large_page = total;
- break;
- }
- if (dma_pte_superpage(pte)) {
- *large_page = total;
- return pte;
- }
- parent = phys_to_virt(dma_pte_addr(pte));
- total--;
- }
- return NULL;
- }
- /* clear last level pte, a tlb flush should be followed */
- static void dma_pte_clear_range(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
- {
- unsigned int large_page = 1;
- struct dma_pte *first_pte, *pte;
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
- /* we don't need lock here; nobody else touches the iova range */
- do {
- large_page = 1;
- first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
- if (!pte) {
- start_pfn = align_to_level(start_pfn + 1, large_page + 1);
- continue;
- }
- do {
- dma_clear_pte(pte);
- start_pfn += lvl_to_nr_pages(large_page);
- pte++;
- } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
- } while (start_pfn && start_pfn <= last_pfn);
- }
- static void dma_pte_free_level(struct dmar_domain *domain, int level,
- struct dma_pte *pte, unsigned long pfn,
- unsigned long start_pfn, unsigned long last_pfn)
- {
- pfn = max(start_pfn, pfn);
- pte = &pte[pfn_level_offset(pfn, level)];
- do {
- unsigned long level_pfn;
- struct dma_pte *level_pte;
- if (!dma_pte_present(pte) || dma_pte_superpage(pte))
- goto next;
- level_pfn = pfn & level_mask(level);
- level_pte = phys_to_virt(dma_pte_addr(pte));
- if (level > 2)
- dma_pte_free_level(domain, level - 1, level_pte,
- level_pfn, start_pfn, last_pfn);
- /* If range covers entire pagetable, free it */
- if (!(start_pfn > level_pfn ||
- last_pfn < level_pfn + level_size(level) - 1)) {
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- free_pgtable_page(level_pte);
- }
- next:
- pfn += level_size(level);
- } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
- }
- /* clear last level (leaf) ptes and free page table pages. */
- static void dma_pte_free_pagetable(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
- {
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
- dma_pte_clear_range(domain, start_pfn, last_pfn);
- /* We don't need lock here; nobody else touches the iova range */
- dma_pte_free_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn);
- /* free pgd */
- if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- free_pgtable_page(domain->pgd);
- domain->pgd = NULL;
- }
- }
- /* When a page at a given level is being unlinked from its parent, we don't
- need to *modify* it at all. All we need to do is make a list of all the
- pages which can be freed just as soon as we've flushed the IOTLB and we
- know the hardware page-walk will no longer touch them.
- The 'pte' argument is the *parent* PTE, pointing to the page that is to
- be freed. */
- static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct page *freelist)
- {
- struct page *pg;
- pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- pg->freelist = freelist;
- freelist = pg;
- if (level == 1)
- return freelist;
- pte = page_address(pg);
- do {
- if (dma_pte_present(pte) && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1,
- pte, freelist);
- pte++;
- } while (!first_pte_in_page(pte));
- return freelist;
- }
- static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
- struct dma_pte *pte, unsigned long pfn,
- unsigned long start_pfn,
- unsigned long last_pfn,
- struct page *freelist)
- {
- struct dma_pte *first_pte = NULL, *last_pte = NULL;
- pfn = max(start_pfn, pfn);
- pte = &pte[pfn_level_offset(pfn, level)];
- do {
- unsigned long level_pfn;
- if (!dma_pte_present(pte))
- goto next;
- level_pfn = pfn & level_mask(level);
- /* If range covers entire pagetable, free it */
- if (start_pfn <= level_pfn &&
- last_pfn >= level_pfn + level_size(level) - 1) {
- /* These suborbinate page tables are going away entirely. Don't
- bother to clear them; we're just going to *free* them. */
- if (level > 1 && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
- dma_clear_pte(pte);
- if (!first_pte)
- first_pte = pte;
- last_pte = pte;
- } else if (level > 1) {
- /* Recurse down into a level that isn't *entirely* obsolete */
- freelist = dma_pte_clear_level(domain, level - 1,
- phys_to_virt(dma_pte_addr(pte)),
- level_pfn, start_pfn, last_pfn,
- freelist);
- }
- next:
- pfn += level_size(level);
- } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
- if (first_pte)
- domain_flush_cache(domain, first_pte,
- (void *)++last_pte - (void *)first_pte);
- return freelist;
- }
- /* We can't just free the pages because the IOMMU may still be walking
- the page tables, and may have cached the intermediate levels. The
- pages can only be freed after the IOTLB flush has been done. */
- static struct page *domain_unmap(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
- {
- struct page *freelist = NULL;
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
- /* we don't need lock here; nobody else touches the iova range */
- freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn, NULL);
- /* free pgd */
- if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- struct page *pgd_page = virt_to_page(domain->pgd);
- pgd_page->freelist = freelist;
- freelist = pgd_page;
- domain->pgd = NULL;
- }
- return freelist;
- }
- static void dma_free_pagelist(struct page *freelist)
- {
- struct page *pg;
- while ((pg = freelist)) {
- freelist = pg->freelist;
- free_pgtable_page(page_address(pg));
- }
- }
- /* iommu handling */
- static int iommu_alloc_root_entry(struct intel_iommu *iommu)
- {
- struct root_entry *root;
- unsigned long flags;
- root = (struct root_entry *)alloc_pgtable_page(iommu->node);
- if (!root) {
- pr_err("Allocating root entry for %s failed\n",
- iommu->name);
- return -ENOMEM;
- }
- __iommu_flush_cache(iommu, root, ROOT_SIZE);
- spin_lock_irqsave(&iommu->lock, flags);
- iommu->root_entry = root;
- spin_unlock_irqrestore(&iommu->lock, flags);
- return 0;
- }
- static void iommu_set_root_entry(struct intel_iommu *iommu)
- {
- u64 addr;
- u32 sts;
- unsigned long flag;
- addr = virt_to_phys(iommu->root_entry);
- if (ecs_enabled(iommu))
- addr |= DMA_RTADDR_RTT;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
- writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- static void iommu_flush_write_buffer(struct intel_iommu *iommu)
- {
- u32 val;
- unsigned long flag;
- if (!rwbf_quirk && !cap_rwbf(iommu->cap))
- return;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- /* return value determine if we need a write buffer flush */
- static void __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask,
- u64 type)
- {
- u64 val = 0;
- unsigned long flag;
- switch (type) {
- case DMA_CCMD_GLOBAL_INVL:
- val = DMA_CCMD_GLOBAL_INVL;
- break;
- case DMA_CCMD_DOMAIN_INVL:
- val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
- break;
- case DMA_CCMD_DEVICE_INVL:
- val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
- | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
- break;
- default:
- BUG();
- }
- val |= DMA_CCMD_ICC;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
- dmar_readq, (!(val & DMA_CCMD_ICC)), val);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- /* return value determine if we need a write buffer flush */
- static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type)
- {
- int tlb_offset = ecap_iotlb_offset(iommu->ecap);
- u64 val = 0, val_iva = 0;
- unsigned long flag;
- switch (type) {
- case DMA_TLB_GLOBAL_FLUSH:
- /* global flush doesn't need set IVA_REG */
- val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
- break;
- case DMA_TLB_DSI_FLUSH:
- val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
- break;
- case DMA_TLB_PSI_FLUSH:
- val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
- /* IH bit is passed in as part of address */
- val_iva = size_order | addr;
- break;
- default:
- BUG();
- }
- /* Note: set drain read/write */
- #if 0
- /*
- * This is probably to be super secure.. Looks like we can
- * ignore it without any impact.
- */
- if (cap_read_drain(iommu->cap))
- val |= DMA_TLB_READ_DRAIN;
- #endif
- if (cap_write_drain(iommu->cap))
- val |= DMA_TLB_WRITE_DRAIN;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- /* Note: Only uses first TLB reg currently */
- if (val_iva)
- dmar_writeq(iommu->reg + tlb_offset, val_iva);
- dmar_writeq(iommu->reg + tlb_offset + 8, val);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, tlb_offset + 8,
- dmar_readq, (!(val & DMA_TLB_IVT)), val);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- /* check IOTLB invalidation granularity */
- if (DMA_TLB_IAIG(val) == 0)
- pr_err("Flush IOTLB failed\n");
- if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
- pr_debug("TLB flush request %Lx, actual %Lx\n",
- (unsigned long long)DMA_TLB_IIRG(type),
- (unsigned long long)DMA_TLB_IAIG(val));
- }
- static struct device_domain_info *
- iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
- {
- struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
- if (!iommu->qi)
- return NULL;
- list_for_each_entry(info, &domain->devices, link)
- if (info->iommu == iommu && info->bus == bus &&
- info->devfn == devfn) {
- if (info->ats_supported && info->dev)
- return info;
- break;
- }
- return NULL;
- }
- static void domain_update_iotlb(struct dmar_domain *domain)
- {
- struct device_domain_info *info;
- bool has_iotlb_device = false;
- assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
- has_iotlb_device = true;
- break;
- }
- }
- domain->has_iotlb_device = has_iotlb_device;
- }
- static void iommu_enable_dev_iotlb(struct device_domain_info *info)
- {
- struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
- if (!info || !dev_is_pci(info->dev))
- return;
- pdev = to_pci_dev(info->dev);
- /* For IOMMU that supports device IOTLB throttling (DIT), we assign
- * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
- * queue depth at PF level. If DIT is not set, PFSID will be treated as
- * reserved, which should be set to 0.
- */
- if (!ecap_dit(info->iommu->ecap))
- info->pfsid = 0;
- else {
- struct pci_dev *pf_pdev;
- /* pdev will be returned if device is not a vf */
- pf_pdev = pci_physfn(pdev);
- info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
- }
- #ifdef CONFIG_INTEL_IOMMU_SVM
- /* The PCIe spec, in its wisdom, declares that the behaviour of
- the device if you enable PASID support after ATS support is
- undefined. So always enable PASID support on devices which
- have it, even if we can't yet know if we're ever going to
- use it. */
- if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
- info->pasid_enabled = 1;
- if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
- info->pri_enabled = 1;
- #endif
- if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
- info->ats_enabled = 1;
- domain_update_iotlb(info->domain);
- info->ats_qdep = pci_ats_queue_depth(pdev);
- }
- }
- static void iommu_disable_dev_iotlb(struct device_domain_info *info)
- {
- struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
- if (!dev_is_pci(info->dev))
- return;
- pdev = to_pci_dev(info->dev);
- if (info->ats_enabled) {
- pci_disable_ats(pdev);
- info->ats_enabled = 0;
- domain_update_iotlb(info->domain);
- }
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (info->pri_enabled) {
- pci_disable_pri(pdev);
- info->pri_enabled = 0;
- }
- if (info->pasid_enabled) {
- pci_disable_pasid(pdev);
- info->pasid_enabled = 0;
- }
- #endif
- }
- static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
- u64 addr, unsigned mask)
- {
- u16 sid, qdep;
- unsigned long flags;
- struct device_domain_info *info;
- if (!domain->has_iotlb_device)
- return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
- static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
- struct dmar_domain *domain,
- unsigned long pfn, unsigned int pages,
- int ih, int map)
- {
- unsigned int mask = ilog2(__roundup_pow_of_two(pages));
- uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- u16 did = domain->iommu_did[iommu->seq_id];
- BUG_ON(pages == 0);
- if (ih)
- ih = 1 << 6;
- /*
- * Fallback to domain selective flush if no PSI support or the size is
- * too big.
- * PSI requires page size to be 2 ^ x, and the base address is naturally
- * aligned to the size
- */
- if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- /*
- * In caching mode, changes of pages from non-present to present require
- * flush. However, device IOTLB doesn't need to be flushed in this case.
- */
- if (!cap_caching_mode(iommu->cap) || !map)
- iommu_flush_dev_iotlb(domain, addr, mask);
- }
- static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
- {
- u32 pmen;
- unsigned long flags;
- if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
- return;
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- pmen = readl(iommu->reg + DMAR_PMEN_REG);
- pmen &= ~DMA_PMEN_EPM;
- writel(pmen, iommu->reg + DMAR_PMEN_REG);
- /* wait for the protected region status bit to clear */
- IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
- readl, !(pmen & DMA_PMEN_PRS), pmen);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- }
- static void iommu_enable_translation(struct intel_iommu *iommu)
- {
- u32 sts;
- unsigned long flags;
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- iommu->gcmd |= DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- }
- static void iommu_disable_translation(struct intel_iommu *iommu)
- {
- u32 sts;
- unsigned long flag;
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- iommu->gcmd &= ~DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- static int iommu_init_domains(struct intel_iommu *iommu)
- {
- u32 ndomains, nlongs;
- size_t size;
- ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%d>\n",
- iommu->name, ndomains);
- nlongs = BITS_TO_LONGS(ndomains);
- spin_lock_init(&iommu->lock);
- iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
- if (!iommu->domain_ids) {
- pr_err("%s: Allocating domain id array failed\n",
- iommu->name);
- return -ENOMEM;
- }
- size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
- iommu->domains = kzalloc(size, GFP_KERNEL);
- if (iommu->domains) {
- size = 256 * sizeof(struct dmar_domain *);
- iommu->domains[0] = kzalloc(size, GFP_KERNEL);
- }
- if (!iommu->domains || !iommu->domains[0]) {
- pr_err("%s: Allocating domain array failed\n",
- iommu->name);
- kfree(iommu->domain_ids);
- kfree(iommu->domains);
- iommu->domain_ids = NULL;
- iommu->domains = NULL;
- return -ENOMEM;
- }
- /*
- * If Caching mode is set, then invalid translations are tagged
- * with domain-id 0, hence we need to pre-allocate it. We also
- * use domain-id 0 as a marker for non-allocated domain-id, so
- * make sure it is not used for a real domain.
- */
- set_bit(0, iommu->domain_ids);
- return 0;
- }
- static void disable_dmar_iommu(struct intel_iommu *iommu)
- {
- struct device_domain_info *info, *tmp;
- unsigned long flags;
- if (!iommu->domains || !iommu->domain_ids)
- return;
- again:
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- struct dmar_domain *domain;
- if (info->iommu != iommu)
- continue;
- if (!info->dev || !info->domain)
- continue;
- domain = info->domain;
- __dmar_remove_one_dev_info(info);
- if (!domain_type_is_vm_or_si(domain)) {
- /*
- * The domain_exit() function can't be called under
- * device_domain_lock, as it takes this lock itself.
- * So release the lock here and re-run the loop
- * afterwards.
- */
- spin_unlock_irqrestore(&device_domain_lock, flags);
- domain_exit(domain);
- goto again;
- }
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- if (iommu->gcmd & DMA_GCMD_TE)
- iommu_disable_translation(iommu);
- }
- static void free_dmar_iommu(struct intel_iommu *iommu)
- {
- if ((iommu->domains) && (iommu->domain_ids)) {
- int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
- int i;
- for (i = 0; i < elems; i++)
- kfree(iommu->domains[i]);
- kfree(iommu->domains);
- kfree(iommu->domain_ids);
- iommu->domains = NULL;
- iommu->domain_ids = NULL;
- }
- g_iommus[iommu->seq_id] = NULL;
- /* free context mapping */
- free_context_table(iommu);
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu)) {
- if (ecap_prs(iommu->ecap))
- intel_svm_finish_prq(iommu);
- intel_svm_free_pasid_tables(iommu);
- }
- #endif
- }
- static struct dmar_domain *alloc_domain(int flags)
- {
- struct dmar_domain *domain;
- domain = alloc_domain_mem();
- if (!domain)
- return NULL;
- memset(domain, 0, sizeof(*domain));
- domain->nid = -1;
- domain->flags = flags;
- domain->has_iotlb_device = false;
- INIT_LIST_HEAD(&domain->devices);
- return domain;
- }
- /* Must be called with iommu->lock */
- static int domain_attach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu)
- {
- unsigned long ndomains;
- int num;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
- domain->iommu_refcnt[iommu->seq_id] += 1;
- domain->iommu_count += 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 1) {
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
- pr_err("%s: No free domain ids\n", iommu->name);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- domain->iommu_count -= 1;
- return -ENOSPC;
- }
- set_bit(num, iommu->domain_ids);
- set_iommu_domain(iommu, num, domain);
- domain->iommu_did[iommu->seq_id] = num;
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
- }
- return 0;
- }
- static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu)
- {
- int num, count = INT_MAX;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- count = --domain->iommu_count;
- if (domain->iommu_refcnt[iommu->seq_id] == 0) {
- num = domain->iommu_did[iommu->seq_id];
- clear_bit(num, iommu->domain_ids);
- set_iommu_domain(iommu, num, NULL);
- domain_update_iommu_cap(domain);
- domain->iommu_did[iommu->seq_id] = 0;
- }
- return count;
- }
- static struct iova_domain reserved_iova_list;
- static struct lock_class_key reserved_rbtree_key;
- static int dmar_init_reserved_ranges(void)
- {
- struct pci_dev *pdev = NULL;
- struct iova *iova;
- int i;
- init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
- lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
- &reserved_rbtree_key);
- /* IOAPIC ranges shouldn't be accessed by DMA */
- iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
- IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova) {
- pr_err("Reserve IOAPIC range failed\n");
- return -ENODEV;
- }
- /* Reserve all PCI MMIO to avoid peer-to-peer access */
- for_each_pci_dev(pdev) {
- struct resource *r;
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- r = &pdev->resource[i];
- if (!r->flags || !(r->flags & IORESOURCE_MEM))
- continue;
- iova = reserve_iova(&reserved_iova_list,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!iova) {
- pr_err("Reserve iova failed\n");
- return -ENODEV;
- }
- }
- }
- return 0;
- }
- static void domain_reserve_special_ranges(struct dmar_domain *domain)
- {
- copy_reserved_iova(&reserved_iova_list, &domain->iovad);
- }
- static inline int guestwidth_to_adjustwidth(int gaw)
- {
- int agaw;
- int r = (gaw - 12) % 9;
- if (r == 0)
- agaw = gaw;
- else
- agaw = gaw + 9 - r;
- if (agaw > 64)
- agaw = 64;
- return agaw;
- }
- static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
- {
- int adjust_width, agaw;
- unsigned long sagaw;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
- domain_reserve_special_ranges(domain);
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
- domain->nid = iommu->node;
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
- }
- static void domain_exit(struct dmar_domain *domain)
- {
- struct page *freelist = NULL;
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
- /* Flush any lazy unmaps that may reference this domain */
- if (!intel_iommu_strict) {
- int cpu;
- for_each_possible_cpu(cpu)
- flush_unmaps_timeout(cpu);
- }
- /* Remove associated devices and clear attached or cached domains */
- rcu_read_lock();
- domain_remove_dev_info(domain);
- rcu_read_unlock();
- /* destroy iovas */
- put_iova_domain(&domain->iovad);
- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
- dma_free_pagelist(freelist);
- free_domain_mem(domain);
- }
- static int domain_context_mapping_one(struct dmar_domain *domain,
- struct intel_iommu *iommu,
- u8 bus, u8 devfn)
- {
- u16 did = domain->iommu_did[iommu->seq_id];
- int translation = CONTEXT_TT_MULTI_LEVEL;
- struct device_domain_info *info = NULL;
- struct context_entry *context;
- unsigned long flags;
- struct dma_pte *pgd;
- int ret, agaw;
- WARN_ON(did == 0);
- if (hw_pass_through && domain_type_is_si(domain))
- translation = CONTEXT_TT_PASS_THROUGH;
- pr_debug("Set context mapping for %02x:%02x.%d\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- BUG_ON(!domain->pgd);
- spin_lock_irqsave(&device_domain_lock, flags);
- spin_lock(&iommu->lock);
- ret = -ENOMEM;
- context = iommu_context_addr(iommu, bus, devfn, 1);
- if (!context)
- goto out_unlock;
- ret = 0;
- if (context_present(context))
- goto out_unlock;
- /*
- * For kdump cases, old valid entries may be cached due to the
- * in-flight DMA and copied pgtable, but there is no unmapping
- * behaviour for them, thus we need an explicit cache flush for
- * the newly-mapped device. For kdump, at this point, the device
- * is supposed to finish reset at its driver probe stage, so no
- * in-flight DMA will exist, and we don't need to worry anymore
- * hereafter.
- */
- if (context_copied(context)) {
- u16 did_old = context_domain_id(context);
- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
- iommu->flush.flush_context(iommu, did_old,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
- DMA_TLB_DSI_FLUSH);
- }
- }
- pgd = domain->pgd;
- context_clear_entry(context);
- context_set_domain_id(context, did);
- /*
- * Skip top levels of page tables for iommu which has less agaw
- * than default. Unnecessary for PT mode.
- */
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- ret = -ENOMEM;
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd))
- goto out_unlock;
- }
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
- if (info && info->ats_supported)
- translation = CONTEXT_TT_DEV_IOTLB;
- else
- translation = CONTEXT_TT_MULTI_LEVEL;
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, agaw);
- } else {
- /*
- * In pass through mode, AW must be programmed to
- * indicate the largest AGAW value supported by
- * hardware. And ASR is ignored by hardware.
- */
- context_set_address_width(context, iommu->msagaw);
- }
- context_set_translation_type(context, translation);
- context_set_fault_enable(context);
- context_set_present(context);
- domain_flush_cache(domain, context, sizeof(*context));
- /*
- * It's a non-present to present mapping. If hardware doesn't cache
- * non-present entry we only need to flush the write-buffer. If the
- * _does_ cache non-present entries, then it does so in the special
- * domain #0, which we have to flush:
- */
- if (cap_caching_mode(iommu->cap)) {
- iommu->flush.flush_context(iommu, 0,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- } else {
- iommu_flush_write_buffer(iommu);
- }
- iommu_enable_dev_iotlb(info);
- ret = 0;
- out_unlock:
- spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return ret;
- }
- struct domain_context_mapping_data {
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- };
- static int domain_context_mapping_cb(struct pci_dev *pdev,
- u16 alias, void *opaque)
- {
- struct domain_context_mapping_data *data = opaque;
- return domain_context_mapping_one(data->domain, data->iommu,
- PCI_BUS_NUM(alias), alias & 0xff);
- }
- static int
- domain_context_mapping(struct dmar_domain *domain, struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- struct domain_context_mapping_data data;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, bus, devfn);
- data.domain = domain;
- data.iommu = iommu;
- return pci_for_each_dma_alias(to_pci_dev(dev),
- &domain_context_mapping_cb, &data);
- }
- static int domain_context_mapped_cb(struct pci_dev *pdev,
- u16 alias, void *opaque)
- {
- struct intel_iommu *iommu = opaque;
- return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
- }
- static int domain_context_mapped(struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- if (!dev_is_pci(dev))
- return device_context_mapped(iommu, bus, devfn);
- return !pci_for_each_dma_alias(to_pci_dev(dev),
- domain_context_mapped_cb, iommu);
- }
- /* Returns a number of VTD pages, but aligned to MM page size */
- static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
- {
- host_addr &= ~PAGE_MASK;
- return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
- }
- /* Return largest possible superpage level for a given mapping */
- static inline int hardware_largepage_caps(struct dmar_domain *domain,
- unsigned long iov_pfn,
- unsigned long phy_pfn,
- unsigned long pages)
- {
- int support, level = 1;
- unsigned long pfnmerge;
- support = domain->iommu_superpage;
- /* To use a large page, the virtual *and* physical addresses
- must be aligned to 2MiB/1GiB/etc. Lower bits set in either
- of them will mean we have to use smaller pages. So just
- merge them and check both at once. */
- pfnmerge = iov_pfn | phy_pfn;
- while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
- pages >>= VTD_STRIDE_SHIFT;
- if (!pages)
- break;
- pfnmerge >>= VTD_STRIDE_SHIFT;
- level++;
- support--;
- }
- return level;
- }
- static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
- {
- struct dma_pte *first_pte = NULL, *pte = NULL;
- phys_addr_t uninitialized_var(pteval);
- unsigned long sg_res = 0;
- unsigned int largepage_lvl = 0;
- unsigned long lvl_pages = 0;
- BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
- if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
- return -EINVAL;
- prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
- if (!sg) {
- sg_res = nr_pages;
- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
- }
- while (nr_pages > 0) {
- uint64_t tmp;
- if (!sg_res) {
- unsigned int pgoff = sg->offset & ~PAGE_MASK;
- sg_res = aligned_nrpages(sg->offset, sg->length);
- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
- sg->dma_length = sg->length;
- pteval = (sg_phys(sg) - pgoff) | prot;
- phys_pfn = pteval >> VTD_PAGE_SHIFT;
- }
- if (!pte) {
- largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
- if (!pte)
- return -ENOMEM;
- /* It is large page*/
- if (largepage_lvl > 1) {
- unsigned long nr_superpages, end_pfn;
- pteval |= DMA_PTE_LARGE_PAGE;
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
- nr_superpages = sg_res / lvl_pages;
- end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
- /*
- * Ensure that old small page tables are
- * removed to make room for superpage(s).
- */
- dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
- } else {
- pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
- }
- }
- /* We don't need lock here, nobody else
- * touches the iova range
- */
- tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
- if (tmp) {
- static int dumps = 5;
- pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
- iov_pfn, tmp, (unsigned long long)pteval);
- if (dumps) {
- dumps--;
- debug_dma_dump_mappings(NULL);
- }
- WARN_ON(1);
- }
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
- BUG_ON(nr_pages < lvl_pages);
- BUG_ON(sg_res < lvl_pages);
- nr_pages -= lvl_pages;
- iov_pfn += lvl_pages;
- phys_pfn += lvl_pages;
- pteval += lvl_pages * VTD_PAGE_SIZE;
- sg_res -= lvl_pages;
- /* If the next PTE would be the first in a new page, then we
- need to flush the cache on the entries we've just written.
- And then we'll need to recalculate 'pte', so clear it and
- let it get set again in the if (!pte) block above.
- If we're done (!nr_pages) we need to flush the cache too.
- Also if we've been setting superpages, we may need to
- recalculate 'pte' and switch back to smaller pages for the
- end of the mapping, if the trailing size is not enough to
- use another superpage (i.e. sg_res < lvl_pages). */
- pte++;
- if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && sg_res < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
- pte = NULL;
- }
- if (!sg_res && nr_pages)
- sg = sg_next(sg);
- }
- return 0;
- }
- static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long nr_pages,
- int prot)
- {
- return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
- }
- static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages,
- int prot)
- {
- return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
- }
- static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
- {
- if (!iommu)
- return;
- clear_context_table(iommu, bus, devfn);
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- }
- static inline void unlink_domain_info(struct device_domain_info *info)
- {
- assert_spin_locked(&device_domain_lock);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->archdata.iommu = NULL;
- }
- static void domain_remove_dev_info(struct dmar_domain *domain)
- {
- struct device_domain_info *info, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
- /*
- * find_domain
- * Note: we use struct device->archdata.iommu stores the info
- */
- static struct dmar_domain *find_domain(struct device *dev)
- {
- struct device_domain_info *info;
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
- if (info)
- return info->domain;
- return NULL;
- }
- static inline struct device_domain_info *
- dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
- {
- struct device_domain_info *info;
- list_for_each_entry(info, &device_domain_list, global)
- if (info->iommu->segment == segment && info->bus == bus &&
- info->devfn == devfn)
- return info;
- return NULL;
- }
- static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
- int bus, int devfn,
- struct device *dev,
- struct dmar_domain *domain)
- {
- struct dmar_domain *found = NULL;
- struct device_domain_info *info;
- unsigned long flags;
- int ret;
- info = alloc_devinfo_mem();
- if (!info)
- return NULL;
- info->bus = bus;
- info->devfn = devfn;
- info->ats_supported = info->pasid_supported = info->pri_supported = 0;
- info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
- info->ats_qdep = 0;
- info->dev = dev;
- info->domain = domain;
- info->iommu = iommu;
- if (dev && dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(info->dev);
- if (ecap_dev_iotlb_support(iommu->ecap) &&
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
- dmar_find_matched_atsr_unit(pdev))
- info->ats_supported = 1;
- if (ecs_enabled(iommu)) {
- if (pasid_enabled(iommu)) {
- int features = pci_pasid_features(pdev);
- if (features >= 0)
- info->pasid_supported = features | 1;
- }
- if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
- info->pri_supported = 1;
- }
- }
- spin_lock_irqsave(&device_domain_lock, flags);
- if (dev)
- found = find_domain(dev);
- if (!found) {
- struct device_domain_info *info2;
- info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
- if (info2) {
- found = info2->domain;
- info2->dev = dev;
- }
- }
- if (found) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
- /* Caller must free the original domain */
- return found;
- }
- spin_lock(&iommu->lock);
- ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
- return NULL;
- }
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- if (dev)
- dev->archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
- if (dev && domain_context_mapping(domain, dev)) {
- pr_err("Domain context map for %s failed\n", dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
- return NULL;
- }
- return domain;
- }
- static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
- {
- *(u16 *)opaque = alias;
- return 0;
- }
- static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
- {
- struct device_domain_info *info = NULL;
- struct dmar_domain *domain = NULL;
- struct intel_iommu *iommu;
- u16 req_id, dma_alias;
- unsigned long flags;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
- req_id = ((u16)bus << 8) | devfn;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
- PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff);
- if (info) {
- iommu = info->iommu;
- domain = info->domain;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- /* DMA alias already has a domain, use it */
- if (info)
- goto out;
- }
- /* Allocate and initialize new domain for the device */
- domain = alloc_domain(0);
- if (!domain)
- return NULL;
- if (domain_init(domain, iommu, gaw)) {
- domain_exit(domain);
- return NULL;
- }
- out:
- return domain;
- }
- static struct dmar_domain *set_domain_for_dev(struct device *dev,
- struct dmar_domain *domain)
- {
- struct intel_iommu *iommu;
- struct dmar_domain *tmp;
- u16 req_id, dma_alias;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
- req_id = ((u16)bus << 8) | devfn;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
- /* register PCI DMA alias device */
- if (req_id != dma_alias) {
- tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
- if (!tmp || tmp != domain)
- return tmp;
- }
- }
- tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (!tmp || tmp != domain)
- return tmp;
- return domain;
- }
- static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
- {
- struct dmar_domain *domain, *tmp;
- domain = find_domain(dev);
- if (domain)
- goto out;
- domain = find_or_alloc_domain(dev, gaw);
- if (!domain)
- goto out;
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
- out:
- return domain;
- }
- static int iommu_domain_identity_map(struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
- {
- unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
- unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
- if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
- dma_to_mm_pfn(last_vpfn))) {
- pr_err("Reserving iova failed\n");
- return -ENOMEM;
- }
- pr_debug("Mapping reserved region %llx-%llx\n", start, end);
- /*
- * RMRR range might have overlap with physical memory range,
- * clear it first
- */
- dma_pte_clear_range(domain, first_vpfn, last_vpfn);
- return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
- last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE);
- }
- static int domain_prepare_identity_map(struct device *dev,
- struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
- {
- /* For _hardware_ passthrough, don't bother. But for software
- passthrough, we do it anyway -- it may indicate a memory
- range which is reserved in E820, so which didn't get set
- up to start with in si_domain */
- if (domain == si_domain && hw_pass_through) {
- pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
- return 0;
- }
- pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
- if (end < start) {
- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
- if (end >> agaw_to_width(domain->agaw)) {
- WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- agaw_to_width(domain->agaw),
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
- return iommu_domain_identity_map(domain, start, end);
- }
- static int iommu_prepare_identity_map(struct device *dev,
- unsigned long long start,
- unsigned long long end)
- {
- struct dmar_domain *domain;
- int ret;
- domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- return -ENOMEM;
- ret = domain_prepare_identity_map(dev, domain, start, end);
- if (ret)
- domain_exit(domain);
- return ret;
- }
- static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
- struct device *dev)
- {
- if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
- return 0;
- return iommu_prepare_identity_map(dev, rmrr->base_address,
- rmrr->end_address);
- }
- #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
- static inline void iommu_prepare_isa(void)
- {
- struct pci_dev *pdev;
- int ret;
- pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- if (!pdev)
- return;
- pr_info("Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
- if (ret)
- pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
- pci_dev_put(pdev);
- }
- #else
- static inline void iommu_prepare_isa(void)
- {
- return;
- }
- #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
- static int md_domain_init(struct dmar_domain *domain, int guest_width);
- static int __init si_domain_init(int hw)
- {
- int nid, ret = 0;
- si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
- if (!si_domain)
- return -EFAULT;
- if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- domain_exit(si_domain);
- return -EFAULT;
- }
- pr_debug("Identity mapping domain allocated\n");
- if (hw)
- return 0;
- for_each_online_node(nid) {
- unsigned long start_pfn, end_pfn;
- int i;
- for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
- ret = iommu_domain_identity_map(si_domain,
- PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
- if (ret)
- return ret;
- }
- }
- return 0;
- }
- static int identity_mapping(struct device *dev)
- {
- struct device_domain_info *info;
- if (likely(!iommu_identity_mapping))
- return 0;
- info = dev->archdata.iommu;
- if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
- return (info->domain == si_domain);
- return 0;
- }
- static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
- {
- struct dmar_domain *ndomain;
- struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (ndomain != domain)
- return -EBUSY;
- return 0;
- }
- static bool device_has_rmrr(struct device *dev)
- {
- struct dmar_rmrr_unit *rmrr;
- struct device *tmp;
- int i;
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- /*
- * Return TRUE if this RMRR contains the device that
- * is passed in.
- */
- for_each_active_dev_scope(rmrr->devices,
- rmrr->devices_cnt, i, tmp)
- if (tmp == dev) {
- rcu_read_unlock();
- return true;
- }
- }
- rcu_read_unlock();
- return false;
- }
- /*
- * There are a couple cases where we need to restrict the functionality of
- * devices associated with RMRRs. The first is when evaluating a device for
- * identity mapping because problems exist when devices are moved in and out
- * of domains and their respective RMRR information is lost. This means that
- * a device with associated RMRRs will never be in a "passthrough" domain.
- * The second is use of the device through the IOMMU API. This interface
- * expects to have full control of the IOVA space for the device. We cannot
- * satisfy both the requirement that RMRR access is maintained and have an
- * unencumbered IOVA space. We also have no ability to quiesce the device's
- * use of the RMRR space or even inform the IOMMU API user of the restriction.
- * We therefore prevent devices associated with an RMRR from participating in
- * the IOMMU API, which eliminates them from device assignment.
- *
- * In both cases we assume that PCI USB devices with RMRRs have them largely
- * for historical reasons and that the RMRR space is not actively used post
- * boot. This exclusion may change if vendors begin to abuse it.
- *
- * The same exception is made for graphics devices, with the requirement that
- * any use of the RMRR regions will be torn down before assigning the device
- * to a guest.
- */
- static bool device_is_rmrr_locked(struct device *dev)
- {
- if (!device_has_rmrr(dev))
- return false;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
- return false;
- }
- return true;
- }
- static int iommu_should_identity_map(struct device *dev, int startup)
- {
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- if (device_is_rmrr_locked(dev))
- return 0;
- if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
- return 1;
- if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return 1;
- if (!(iommu_identity_mapping & IDENTMAP_ALL))
- return 0;
- /*
- * We want to start off with all devices in the 1:1 domain, and
- * take them out later if we find they can't access all of memory.
- *
- * However, we can't do this for PCI devices behind bridges,
- * because all PCI devices behind the same bridge will end up
- * with the same source-id on their transactions.
- *
- * Practically speaking, we can't change things around for these
- * devices at run-time, because we can't be sure there'll be no
- * DMA transactions in flight for any of their siblings.
- *
- * So PCI devices (unless they're on the root bus) as well as
- * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
- * the 1:1 domain, just in _case_ one of their siblings turns out
- * not to be able to map all of memory.
- */
- if (!pci_is_pcie(pdev)) {
- if (!pci_is_root_bus(pdev->bus))
- return 0;
- if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
- return 0;
- } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
- } else {
- if (device_has_rmrr(dev))
- return 0;
- }
- /*
- * At boot time, we don't yet know if devices will be 64-bit capable.
- * Assume that they will — if they turn out not to be, then we can
- * take them out of the 1:1 domain later.
- */
- if (!startup) {
- /*
- * If the device's dma_mask is less than the system's memory
- * size then this is not a candidate for identity mapping.
- */
- u64 dma_mask = *dev->dma_mask;
- if (dev->coherent_dma_mask &&
- dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
- return dma_mask >= dma_get_required_mask(dev);
- }
- return 1;
- }
- static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
- {
- int ret;
- if (!iommu_should_identity_map(dev, 1))
- return 0;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret)
- pr_info("%s identity mapping for device %s\n",
- hw ? "Hardware" : "Software", dev_name(dev));
- else if (ret == -ENODEV)
- /* device not associated with an iommu */
- ret = 0;
- return ret;
- }
- static int __init iommu_prepare_static_identity_mapping(int hw)
- {
- struct pci_dev *pdev = NULL;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- struct device *dev;
- int i;
- int ret = 0;
- for_each_pci_dev(pdev) {
- ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
- if (ret)
- return ret;
- }
- for_each_active_iommu(iommu, drhd)
- for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
- struct acpi_device_physical_node *pn;
- struct acpi_device *adev;
- if (dev->bus != &acpi_bus_type)
- continue;
- adev= to_acpi_device(dev);
- mutex_lock(&adev->physical_node_lock);
- list_for_each_entry(pn, &adev->physical_node_list, node) {
- ret = dev_prepare_static_identity_mapping(pn->dev, hw);
- if (ret)
- break;
- }
- mutex_unlock(&adev->physical_node_lock);
- if (ret)
- return ret;
- }
- return 0;
- }
- static void intel_iommu_init_qi(struct intel_iommu *iommu)
- {
- /*
- * Start from the sane iommu hardware state.
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (!iommu->qi) {
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
- }
- if (dmar_enable_qi(iommu)) {
- /*
- * Queued Invalidate not enabled, use Register Based Invalidate
- */
- iommu->flush.flush_context = __iommu_flush_context;
- iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- pr_info("%s: Using Register based invalidation\n",
- iommu->name);
- } else {
- iommu->flush.flush_context = qi_flush_context;
- iommu->flush.flush_iotlb = qi_flush_iotlb;
- pr_info("%s: Using Queued invalidation\n", iommu->name);
- }
- }
- static int copy_context_table(struct intel_iommu *iommu,
- struct root_entry *old_re,
- struct context_entry **tbl,
- int bus, bool ext)
- {
- int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
- struct context_entry *new_ce = NULL, ce;
- struct context_entry *old_ce = NULL;
- struct root_entry re;
- phys_addr_t old_ce_phys;
- tbl_idx = ext ? bus * 2 : bus;
- memcpy(&re, old_re, sizeof(re));
- for (devfn = 0; devfn < 256; devfn++) {
- /* First calculate the correct index */
- idx = (ext ? devfn * 2 : devfn) % 256;
- if (idx == 0) {
- /* First save what we may have and clean up */
- if (new_ce) {
- tbl[tbl_idx] = new_ce;
- __iommu_flush_cache(iommu, new_ce,
- VTD_PAGE_SIZE);
- pos = 1;
- }
- if (old_ce)
- memunmap(old_ce);
- ret = 0;
- if (devfn < 0x80)
- old_ce_phys = root_entry_lctp(&re);
- else
- old_ce_phys = root_entry_uctp(&re);
- if (!old_ce_phys) {
- if (ext && devfn == 0) {
- /* No LCTP, try UCTP */
- devfn = 0x7f;
- continue;
- } else {
- goto out;
- }
- }
- ret = -ENOMEM;
- old_ce = memremap(old_ce_phys, PAGE_SIZE,
- MEMREMAP_WB);
- if (!old_ce)
- goto out;
- new_ce = alloc_pgtable_page(iommu->node);
- if (!new_ce)
- goto out_unmap;
- ret = 0;
- }
- /* Now copy the context entry */
- memcpy(&ce, old_ce + idx, sizeof(ce));
- if (!__context_present(&ce))
- continue;
- did = context_domain_id(&ce);
- if (did >= 0 && did < cap_ndoms(iommu->cap))
- set_bit(did, iommu->domain_ids);
- /*
- * We need a marker for copied context entries. This
- * marker needs to work for the old format as well as
- * for extended context entries.
- *
- * Bit 67 of the context entry is used. In the old
- * format this bit is available to software, in the
- * extended format it is the PGE bit, but PGE is ignored
- * by HW if PASIDs are disabled (and thus still
- * available).
- *
- * So disable PASIDs first and then mark the entry
- * copied. This means that we don't copy PASID
- * translations from the old kernel, but this is fine as
- * faults there are not fatal.
- */
- context_clear_pasid_enable(&ce);
- context_set_copied(&ce);
- new_ce[idx] = ce;
- }
- tbl[tbl_idx + pos] = new_ce;
- __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
- out_unmap:
- memunmap(old_ce);
- out:
- return ret;
- }
- static int copy_translation_tables(struct intel_iommu *iommu)
- {
- struct context_entry **ctxt_tbls;
- struct root_entry *old_rt;
- phys_addr_t old_rt_phys;
- int ctxt_table_entries;
- unsigned long flags;
- u64 rtaddr_reg;
- int bus, ret;
- bool new_ext, ext;
- rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
- ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
- new_ext = !!ecap_ecs(iommu->ecap);
- /*
- * The RTT bit can only be changed when translation is disabled,
- * but disabling translation means to open a window for data
- * corruption. So bail out and don't copy anything if we would
- * have to change the bit.
- */
- if (new_ext != ext)
- return -EINVAL;
- old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
- if (!old_rt_phys)
- return -EINVAL;
- old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
- if (!old_rt)
- return -ENOMEM;
- /* This is too big for the stack - allocate it from slab */
- ctxt_table_entries = ext ? 512 : 256;
- ret = -ENOMEM;
- ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
- if (!ctxt_tbls)
- goto out_unmap;
- for (bus = 0; bus < 256; bus++) {
- ret = copy_context_table(iommu, &old_rt[bus],
- ctxt_tbls, bus, ext);
- if (ret) {
- pr_err("%s: Failed to copy context table for bus %d\n",
- iommu->name, bus);
- continue;
- }
- }
- spin_lock_irqsave(&iommu->lock, flags);
- /* Context tables are copied, now write them to the root_entry table */
- for (bus = 0; bus < 256; bus++) {
- int idx = ext ? bus * 2 : bus;
- u64 val;
- if (ctxt_tbls[idx]) {
- val = virt_to_phys(ctxt_tbls[idx]) | 1;
- iommu->root_entry[bus].lo = val;
- }
- if (!ext || !ctxt_tbls[idx + 1])
- continue;
- val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
- iommu->root_entry[bus].hi = val;
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
- kfree(ctxt_tbls);
- __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
- ret = 0;
- out_unmap:
- memunmap(old_rt);
- return ret;
- }
- static int __init init_dmars(void)
- {
- struct dmar_drhd_unit *drhd;
- struct dmar_rmrr_unit *rmrr;
- bool copied_tables = false;
- struct device *dev;
- struct intel_iommu *iommu;
- int i, ret, cpu;
- /*
- * for each drhd
- * allocate root
- * initialize and program root entry to not present
- * endfor
- */
- for_each_drhd_unit(drhd) {
- /*
- * lock not needed as this is only incremented in the single
- * threaded kernel __init code path all other access are read
- * only
- */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
- g_num_of_iommus++;
- continue;
- }
- pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
- }
- /* Preallocate enough resources for IOMMU hot-addition */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
- g_num_of_iommus = DMAR_UNITS_SUPPORTED;
- g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
- GFP_KERNEL);
- if (!g_iommus) {
- pr_err("Allocating global iommu array failed\n");
- ret = -ENOMEM;
- goto error;
- }
- for_each_possible_cpu(cpu) {
- struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
- cpu);
- dfd->tables = kzalloc(g_num_of_iommus *
- sizeof(struct deferred_flush_table),
- GFP_KERNEL);
- if (!dfd->tables) {
- ret = -ENOMEM;
- goto free_g_iommus;
- }
- spin_lock_init(&dfd->lock);
- setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
- }
- for_each_active_iommu(iommu, drhd) {
- g_iommus[iommu->seq_id] = iommu;
- intel_iommu_init_qi(iommu);
- ret = iommu_init_domains(iommu);
- if (ret)
- goto free_iommu;
- init_translation_status(iommu);
- if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
- iommu_disable_translation(iommu);
- clear_translation_pre_enabled(iommu);
- pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
- iommu->name);
- }
- /*
- * TBD:
- * we could share the same root & context tables
- * among all IOMMU's. Need to Split it later.
- */
- ret = iommu_alloc_root_entry(iommu);
- if (ret)
- goto free_iommu;
- if (translation_pre_enabled(iommu)) {
- pr_info("Translation already enabled - trying to copy translation structures\n");
- ret = copy_translation_tables(iommu);
- if (ret) {
- /*
- * We found the IOMMU with translation
- * enabled - but failed to copy over the
- * old root-entry table. Try to proceed
- * by disabling translation now and
- * allocating a clean root-entry table.
- * This might cause DMAR faults, but
- * probably the dump will still succeed.
- */
- pr_err("Failed to copy translation tables from previous kernel for %s\n",
- iommu->name);
- iommu_disable_translation(iommu);
- clear_translation_pre_enabled(iommu);
- } else {
- pr_info("Copied translation tables from previous kernel for %s\n",
- iommu->name);
- copied_tables = true;
- }
- }
- if (!ecap_pass_through(iommu->ecap))
- hw_pass_through = 0;
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu))
- intel_svm_alloc_pasid_tables(iommu);
- #endif
- }
- /*
- * Now that qi is enabled on all iommus, set the root entry and flush
- * caches. This is required on some Intel X58 chipsets, otherwise the
- * flush_context function will loop forever and the boot hangs.
- */
- for_each_active_iommu(iommu, drhd) {
- iommu_flush_write_buffer(iommu);
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- }
- if (iommu_pass_through)
- iommu_identity_mapping |= IDENTMAP_ALL;
- #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
- #endif
- check_tylersburg_isoch();
- if (iommu_identity_mapping) {
- ret = si_domain_init(hw_pass_through);
- if (ret)
- goto free_iommu;
- }
- /*
- * If we copied translations from a previous kernel in the kdump
- * case, we can not assign the devices to domains now, as that
- * would eliminate the old mappings. So skip this part and defer
- * the assignment to device driver initialization time.
- */
- if (copied_tables)
- goto domains_done;
- /*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
- */
- if (iommu_identity_mapping) {
- ret = iommu_prepare_static_identity_mapping(hw_pass_through);
- if (ret) {
- pr_crit("Failed to setup IOMMU pass-through\n");
- goto free_iommu;
- }
- }
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- pr_info("Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- /* some BIOS lists non-exist devices in DMAR table. */
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, dev) {
- ret = iommu_prepare_rmrr_dev(rmrr, dev);
- if (ret)
- pr_err("Mapping reserved region failed\n");
- }
- }
- iommu_prepare_isa();
- domains_done:
- /*
- * for each drhd
- * enable fault log
- * global invalidate context cache
- * global invalidate iotlb
- * enable translation
- */
- for_each_iommu(iommu, drhd) {
- if (drhd->ignored) {
- /*
- * we always have to disable PMRs or DMA may fail on
- * this device
- */
- if (force_on)
- iommu_disable_protect_mem_regions(iommu);
- continue;
- }
- iommu_flush_write_buffer(iommu);
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
- ret = intel_svm_enable_prq(iommu);
- if (ret)
- goto free_iommu;
- }
- #endif
- ret = dmar_set_interrupt(iommu);
- if (ret)
- goto free_iommu;
- if (!translation_pre_enabled(iommu))
- iommu_enable_translation(iommu);
- iommu_disable_protect_mem_regions(iommu);
- }
- return 0;
- free_iommu:
- for_each_active_iommu(iommu, drhd) {
- disable_dmar_iommu(iommu);
- free_dmar_iommu(iommu);
- }
- free_g_iommus:
- for_each_possible_cpu(cpu)
- kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
- kfree(g_iommus);
- error:
- return ret;
- }
- /* This takes a number of _MM_ pages, not VTD pages */
- static unsigned long intel_alloc_iova(struct device *dev,
- struct dmar_domain *domain,
- unsigned long nrpages, uint64_t dma_mask)
- {
- unsigned long iova_pfn = 0;
- /* Restrict dma_mask to the width that the iommu can handle */
- dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
- /* Ensure we reserve the whole size-aligned region */
- nrpages = __roundup_pow_of_two(nrpages);
- if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
- /*
- * First try to allocate an io virtual address in
- * DMA_BIT_MASK(32) and if that fails then try allocating
- * from higher range
- */
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)));
- if (iova_pfn)
- return iova_pfn;
- }
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
- if (unlikely(!iova_pfn)) {
- pr_err("Allocating %ld-page iova for %s failed",
- nrpages, dev_name(dev));
- return 0;
- }
- return iova_pfn;
- }
- static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
- {
- struct dmar_domain *domain, *tmp;
- struct dmar_rmrr_unit *rmrr;
- struct device *i_dev;
- int i, ret;
- domain = find_domain(dev);
- if (domain)
- goto out;
- domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- goto out;
- /* We have a new domain - setup possible RMRRs for the device */
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, i_dev) {
- if (i_dev != dev)
- continue;
- ret = domain_prepare_identity_map(dev, domain,
- rmrr->base_address,
- rmrr->end_address);
- if (ret)
- dev_err(dev, "Mapping reserved region failed\n");
- }
- }
- rcu_read_unlock();
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
- out:
- if (!domain)
- pr_err("Allocating domain for %s failed\n", dev_name(dev));
- return domain;
- }
- static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
- {
- struct device_domain_info *info;
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
- if (likely(info))
- return info->domain;
- return __get_valid_domain_for_dev(dev);
- }
- /* Check if the dev needs to go through non-identity map and unmap process.*/
- static int iommu_no_mapping(struct device *dev)
- {
- int found;
- if (iommu_dummy(dev))
- return 1;
- if (!iommu_identity_mapping)
- return 0;
- found = identity_mapping(dev);
- if (found) {
- if (iommu_should_identity_map(dev, 0))
- return 1;
- else {
- /*
- * 32 bit DMA is removed from si_domain and fall back
- * to non-identity mapping.
- */
- dmar_remove_one_dev_info(si_domain, dev);
- pr_info("32bit %s uses non-identity mapping\n",
- dev_name(dev));
- return 0;
- }
- } else {
- /*
- * In case of a detached 64 bit DMA device from vm, the device
- * is put into si_domain for identity mapping.
- */
- if (iommu_should_identity_map(dev, 0)) {
- int ret;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret) {
- pr_info("64bit %s uses identity mapping\n",
- dev_name(dev));
- return 1;
- }
- }
- }
- return 0;
- }
- static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
- size_t size, int dir, u64 dma_mask)
- {
- struct dmar_domain *domain;
- phys_addr_t start_paddr;
- unsigned long iova_pfn;
- int prot = 0;
- int ret;
- struct intel_iommu *iommu;
- unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
- BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return paddr;
- domain = get_valid_domain_for_dev(dev);
- if (!domain)
- return 0;
- iommu = domain_get_iommu(domain);
- size = aligned_nrpages(paddr, size);
- iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
- if (!iova_pfn)
- goto error;
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
- /*
- * paddr - (paddr + size) might be partial page, we should map the whole
- * page. Note: if two part of one page are separately mapped, we
- * might have two guest_addr mapping to the same host paddr, but this
- * is not a big problem
- */
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
- mm_to_dma_pfn(paddr_pfn), size, prot);
- if (ret)
- goto error;
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova_pfn),
- size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
- start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
- start_paddr += paddr & ~PAGE_MASK;
- return start_paddr;
- error:
- if (iova_pfn)
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
- dev_name(dev), size, (unsigned long long)paddr, dir);
- return 0;
- }
- static dma_addr_t intel_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
- {
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, *dev->dma_mask);
- }
- static void flush_unmaps(struct deferred_flush_data *flush_data)
- {
- int i, j;
- flush_data->timer_on = 0;
- /* just flush them all */
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- struct deferred_flush_table *flush_table =
- &flush_data->tables[i];
- if (!iommu)
- continue;
- if (!flush_table->next)
- continue;
- /* In caching mode, global flushes turn emulation expensive */
- if (!cap_caching_mode(iommu->cap))
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- for (j = 0; j < flush_table->next; j++) {
- unsigned long mask;
- struct deferred_flush_entry *entry =
- &flush_table->entries[j];
- unsigned long iova_pfn = entry->iova_pfn;
- unsigned long nrpages = entry->nrpages;
- struct dmar_domain *domain = entry->domain;
- struct page *freelist = entry->freelist;
- /* On real hardware multiple invalidations are expensive */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova_pfn),
- nrpages, !freelist, 0);
- else {
- mask = ilog2(nrpages);
- iommu_flush_dev_iotlb(domain,
- (uint64_t)iova_pfn << PAGE_SHIFT, mask);
- }
- free_iova_fast(&domain->iovad, iova_pfn, nrpages);
- if (freelist)
- dma_free_pagelist(freelist);
- }
- flush_table->next = 0;
- }
- flush_data->size = 0;
- }
- static void flush_unmaps_timeout(unsigned long cpuid)
- {
- struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
- unsigned long flags;
- spin_lock_irqsave(&flush_data->lock, flags);
- flush_unmaps(flush_data);
- spin_unlock_irqrestore(&flush_data->lock, flags);
- }
- static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
- unsigned long nrpages, struct page *freelist)
- {
- unsigned long flags;
- int entry_id, iommu_id;
- struct intel_iommu *iommu;
- struct deferred_flush_entry *entry;
- struct deferred_flush_data *flush_data;
- unsigned int cpuid;
- cpuid = get_cpu();
- flush_data = per_cpu_ptr(&deferred_flush, cpuid);
- /* Flush all CPUs' entries to avoid deferring too much. If
- * this becomes a bottleneck, can just flush us, and rely on
- * flush timer for the rest.
- */
- if (flush_data->size == HIGH_WATER_MARK) {
- int cpu;
- for_each_online_cpu(cpu)
- flush_unmaps_timeout(cpu);
- }
- spin_lock_irqsave(&flush_data->lock, flags);
- iommu = domain_get_iommu(dom);
- iommu_id = iommu->seq_id;
- entry_id = flush_data->tables[iommu_id].next;
- ++(flush_data->tables[iommu_id].next);
- entry = &flush_data->tables[iommu_id].entries[entry_id];
- entry->domain = dom;
- entry->iova_pfn = iova_pfn;
- entry->nrpages = nrpages;
- entry->freelist = freelist;
- if (!flush_data->timer_on) {
- mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
- flush_data->timer_on = 1;
- }
- flush_data->size++;
- spin_unlock_irqrestore(&flush_data->lock, flags);
- put_cpu();
- }
- static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
- {
- struct dmar_domain *domain;
- unsigned long start_pfn, last_pfn;
- unsigned long nrpages;
- unsigned long iova_pfn;
- struct intel_iommu *iommu;
- struct page *freelist;
- if (iommu_no_mapping(dev))
- return;
- domain = find_domain(dev);
- BUG_ON(!domain);
- iommu = domain_get_iommu(domain);
- iova_pfn = IOVA_PFN(dev_addr);
- nrpages = aligned_nrpages(dev_addr, size);
- start_pfn = mm_to_dma_pfn(iova_pfn);
- last_pfn = start_pfn + nrpages - 1;
- pr_debug("Device %s unmapping: pfn %lx-%lx\n",
- dev_name(dev), start_pfn, last_pfn);
- freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain, start_pfn,
- nrpages, !freelist, 0);
- /* free iova */
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
- dma_free_pagelist(freelist);
- } else {
- add_unmap(domain, iova_pfn, nrpages, freelist);
- /*
- * queue up the release of the unmap to save the 1/6th of the
- * cpu used up by the iotlb flush operation...
- */
- }
- }
- static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
- {
- intel_unmap(dev, dev_addr, size);
- }
- static void *intel_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
- {
- struct page *page = NULL;
- int order;
- size = PAGE_ALIGN(size);
- order = get_order(size);
- if (!iommu_no_mapping(dev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
- if (gfpflags_allow_blocking(flags)) {
- unsigned int count = size >> PAGE_SHIFT;
- page = dma_alloc_from_contiguous(dev, count, order);
- if (page && iommu_no_mapping(dev) &&
- page_to_phys(page) + size > dev->coherent_dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
- }
- if (!page)
- page = alloc_pages(flags, order);
- if (!page)
- return NULL;
- memset(page_address(page), 0, size);
- *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
- DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (*dma_handle)
- return page_address(page);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
- return NULL;
- }
- static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
- {
- int order;
- struct page *page = virt_to_page(vaddr);
- size = PAGE_ALIGN(size);
- order = get_order(size);
- intel_unmap(dev, dma_handle, size);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
- }
- static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
- {
- dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
- unsigned long nrpages = 0;
- struct scatterlist *sg;
- int i;
- for_each_sg(sglist, sg, nelems, i) {
- nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
- }
- intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
- }
- static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sglist, int nelems, int dir)
- {
- int i;
- struct scatterlist *sg;
- for_each_sg(sglist, sg, nelems, i) {
- BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nelems;
- }
- static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
- {
- int i;
- struct dmar_domain *domain;
- size_t size = 0;
- int prot = 0;
- unsigned long iova_pfn;
- int ret;
- struct scatterlist *sg;
- unsigned long start_vpfn;
- struct intel_iommu *iommu;
- BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
- domain = get_valid_domain_for_dev(dev);
- if (!domain)
- return 0;
- iommu = domain_get_iommu(domain);
- for_each_sg(sglist, sg, nelems, i)
- size += aligned_nrpages(sg->offset, sg->length);
- iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
- *dev->dma_mask);
- if (!iova_pfn) {
- sglist->dma_length = 0;
- return 0;
- }
- /*
- * Check if DMAR supports zero-length reads on write only
- * mappings..
- */
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
- !cap_zlr(iommu->cap))
- prot |= DMA_PTE_READ;
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- prot |= DMA_PTE_WRITE;
- start_vpfn = mm_to_dma_pfn(iova_pfn);
- ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
- if (unlikely(ret)) {
- dma_pte_free_pagetable(domain, start_vpfn,
- start_vpfn + size - 1);
- free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- return 0;
- }
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
- return nelems;
- }
- static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
- {
- return !dma_addr;
- }
- struct dma_map_ops intel_dma_ops = {
- .alloc = intel_alloc_coherent,
- .free = intel_free_coherent,
- .map_sg = intel_map_sg,
- .unmap_sg = intel_unmap_sg,
- .map_page = intel_map_page,
- .unmap_page = intel_unmap_page,
- .mapping_error = intel_mapping_error,
- };
- static inline int iommu_domain_cache_init(void)
- {
- int ret = 0;
- iommu_domain_cache = kmem_cache_create("iommu_domain",
- sizeof(struct dmar_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_domain_cache) {
- pr_err("Couldn't create iommu_domain cache\n");
- ret = -ENOMEM;
- }
- return ret;
- }
- static inline int iommu_devinfo_cache_init(void)
- {
- int ret = 0;
- iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_devinfo_cache) {
- pr_err("Couldn't create devinfo cache\n");
- ret = -ENOMEM;
- }
- return ret;
- }
- static int __init iommu_init_mempool(void)
- {
- int ret;
- ret = iova_cache_get();
- if (ret)
- return ret;
- ret = iommu_domain_cache_init();
- if (ret)
- goto domain_error;
- ret = iommu_devinfo_cache_init();
- if (!ret)
- return ret;
- kmem_cache_destroy(iommu_domain_cache);
- domain_error:
- iova_cache_put();
- return -ENOMEM;
- }
- static void __init iommu_exit_mempool(void)
- {
- kmem_cache_destroy(iommu_devinfo_cache);
- kmem_cache_destroy(iommu_domain_cache);
- iova_cache_put();
- }
- static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
- {
- struct dmar_drhd_unit *drhd;
- u32 vtbar;
- int rc;
- /* We know that this device on this chipset has its own IOMMU.
- * If we find it under a different IOMMU, then the BIOS is lying
- * to us. Hope that the IOMMU for this device is actually
- * disabled, and it needs no translation...
- */
- rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
- if (rc) {
- /* "can't" happen */
- dev_info(&pdev->dev, "failed to run vt-d quirk\n");
- return;
- }
- vtbar &= 0xffff0000;
- /* we know that the this iommu should be at offset 0xa000 from vtbar */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
- TAINT_FIRMWARE_WORKAROUND,
- "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
- pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
- }
- DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
- static void __init init_no_remapping_devices(void)
- {
- struct dmar_drhd_unit *drhd;
- struct device *dev;
- int i;
- for_each_drhd_unit(drhd) {
- if (!drhd->include_all) {
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- break;
- /* ignore DMAR unit if no devices exist */
- if (i == drhd->devices_cnt)
- drhd->ignored = 1;
- }
- }
- for_each_active_drhd_unit(drhd) {
- if (drhd->include_all)
- continue;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
- break;
- if (i < drhd->devices_cnt)
- continue;
- /* This IOMMU has *only* gfx devices. Either bypass it or
- set the gfx_mapped flag, as appropriate */
- if (!dmar_map_gfx) {
- drhd->ignored = 1;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
- }
- }
- }
- #ifdef CONFIG_SUSPEND
- static int init_iommu_hw(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- for_each_active_iommu(iommu, drhd)
- if (iommu->qi)
- dmar_reenable_qi(iommu);
- for_each_iommu(iommu, drhd) {
- if (drhd->ignored) {
- /*
- * we always have to disable PMRs or DMA may fail on
- * this device
- */
- if (force_on)
- iommu_disable_protect_mem_regions(iommu);
- continue;
- }
-
- iommu_flush_write_buffer(iommu);
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
- iommu_disable_protect_mem_regions(iommu);
- }
- return 0;
- }
- static void iommu_flush_all(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- for_each_active_iommu(iommu, drhd) {
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- }
- }
- static int iommu_suspend(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- unsigned long flag;
- for_each_active_iommu(iommu, drhd) {
- iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
- GFP_ATOMIC);
- if (!iommu->iommu_state)
- goto nomem;
- }
- iommu_flush_all();
- for_each_active_iommu(iommu, drhd) {
- iommu_disable_translation(iommu);
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- iommu->iommu_state[SR_DMAR_FECTL_REG] =
- readl(iommu->reg + DMAR_FECTL_REG);
- iommu->iommu_state[SR_DMAR_FEDATA_REG] =
- readl(iommu->reg + DMAR_FEDATA_REG);
- iommu->iommu_state[SR_DMAR_FEADDR_REG] =
- readl(iommu->reg + DMAR_FEADDR_REG);
- iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
- readl(iommu->reg + DMAR_FEUADDR_REG);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- return 0;
- nomem:
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
- return -ENOMEM;
- }
- static void iommu_resume(void)
- {
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
- unsigned long flag;
- if (init_iommu_hw()) {
- if (force_on)
- panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
- else
- WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
- return;
- }
- for_each_active_iommu(iommu, drhd) {
- raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
- iommu->reg + DMAR_FECTL_REG);
- writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
- iommu->reg + DMAR_FEDATA_REG);
- writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
- iommu->reg + DMAR_FEADDR_REG);
- writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
- iommu->reg + DMAR_FEUADDR_REG);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
- }
- static struct syscore_ops iommu_syscore_ops = {
- .resume = iommu_resume,
- .suspend = iommu_suspend,
- };
- static void __init init_iommu_pm_ops(void)
- {
- register_syscore_ops(&iommu_syscore_ops);
- }
- #else
- static inline void init_iommu_pm_ops(void) {}
- #endif /* CONFIG_PM */
- int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
- {
- struct acpi_dmar_reserved_memory *rmrr;
- struct dmar_rmrr_unit *rmrru;
- rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
- if (!rmrru)
- return -ENOMEM;
- rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
- rmrru->base_address = rmrr->base_address;
- rmrru->end_address = rmrr->end_address;
- rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt);
- if (rmrru->devices_cnt && rmrru->devices == NULL) {
- kfree(rmrru);
- return -ENOMEM;
- }
- list_add(&rmrru->list, &dmar_rmrr_units);
- return 0;
- }
- static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
- {
- struct dmar_atsr_unit *atsru;
- struct acpi_dmar_atsr *tmp;
- list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
- tmp = (struct acpi_dmar_atsr *)atsru->hdr;
- if (atsr->segment != tmp->segment)
- continue;
- if (atsr->header.length != tmp->header.length)
- continue;
- if (memcmp(atsr, tmp, atsr->header.length) == 0)
- return atsru;
- }
- return NULL;
- }
- int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
- {
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
- return 0;
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = dmar_find_atsr(atsr);
- if (atsru)
- return 0;
- atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
- if (!atsru)
- return -ENOMEM;
- /*
- * If memory is allocated from slab by ACPI _DSM method, we need to
- * copy the memory content because the memory buffer will be freed
- * on return.
- */
- atsru->hdr = (void *)(atsru + 1);
- memcpy(atsru->hdr, hdr, hdr->length);
- atsru->include_all = atsr->flags & 0x1;
- if (!atsru->include_all) {
- atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt);
- if (atsru->devices_cnt && atsru->devices == NULL) {
- kfree(atsru);
- return -ENOMEM;
- }
- }
- list_add_rcu(&atsru->list, &dmar_atsr_units);
- return 0;
- }
- static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
- {
- dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
- kfree(atsru);
- }
- int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
- {
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = dmar_find_atsr(atsr);
- if (atsru) {
- list_del_rcu(&atsru->list);
- synchronize_rcu();
- intel_iommu_free_atsr(atsru);
- }
- return 0;
- }
- int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
- {
- int i;
- struct device *dev;
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = dmar_find_atsr(atsr);
- if (!atsru)
- return 0;
- if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
- for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
- i, dev)
- return -EBUSY;
- }
- return 0;
- }
- static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
- {
- int sp, ret = 0;
- struct intel_iommu *iommu = dmaru->iommu;
- if (g_iommus[iommu->seq_id])
- return 0;
- if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
- pr_warn("%s: Doesn't support hardware pass through.\n",
- iommu->name);
- return -ENXIO;
- }
- if (!ecap_sc_support(iommu->ecap) &&
- domain_update_iommu_snooping(iommu)) {
- pr_warn("%s: Doesn't support snooping.\n",
- iommu->name);
- return -ENXIO;
- }
- sp = domain_update_iommu_superpage(iommu) - 1;
- if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
- pr_warn("%s: Doesn't support large page.\n",
- iommu->name);
- return -ENXIO;
- }
- /*
- * Disable translation if already enabled prior to OS handover.
- */
- if (iommu->gcmd & DMA_GCMD_TE)
- iommu_disable_translation(iommu);
- g_iommus[iommu->seq_id] = iommu;
- ret = iommu_init_domains(iommu);
- if (ret == 0)
- ret = iommu_alloc_root_entry(iommu);
- if (ret)
- goto out;
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu))
- intel_svm_alloc_pasid_tables(iommu);
- #endif
- if (dmaru->ignored) {
- /*
- * we always have to disable PMRs or DMA may fail on this device
- */
- if (force_on)
- iommu_disable_protect_mem_regions(iommu);
- return 0;
- }
- intel_iommu_init_qi(iommu);
- iommu_flush_write_buffer(iommu);
- #ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
- ret = intel_svm_enable_prq(iommu);
- if (ret)
- goto disable_iommu;
- }
- #endif
- ret = dmar_set_interrupt(iommu);
- if (ret)
- goto disable_iommu;
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
- iommu_disable_protect_mem_regions(iommu);
- return 0;
- disable_iommu:
- disable_dmar_iommu(iommu);
- out:
- free_dmar_iommu(iommu);
- return ret;
- }
- int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
- {
- int ret = 0;
- struct intel_iommu *iommu = dmaru->iommu;
- if (!intel_iommu_enabled)
- return 0;
- if (iommu == NULL)
- return -EINVAL;
- if (insert) {
- ret = intel_iommu_add(dmaru);
- } else {
- disable_dmar_iommu(iommu);
- free_dmar_iommu(iommu);
- }
- return ret;
- }
- static void intel_iommu_free_dmars(void)
- {
- struct dmar_rmrr_unit *rmrru, *rmrr_n;
- struct dmar_atsr_unit *atsru, *atsr_n;
- list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
- list_del(&rmrru->list);
- dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
- kfree(rmrru);
- }
- list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
- list_del(&atsru->list);
- intel_iommu_free_atsr(atsru);
- }
- }
- int dmar_find_matched_atsr_unit(struct pci_dev *dev)
- {
- int i, ret = 1;
- struct pci_bus *bus;
- struct pci_dev *bridge = NULL;
- struct device *tmp;
- struct acpi_dmar_atsr *atsr;
- struct dmar_atsr_unit *atsru;
- dev = pci_physfn(dev);
- for (bus = dev->bus; bus; bus = bus->parent) {
- bridge = bus->self;
- /* If it's an integrated device, allow ATS */
- if (!bridge)
- return 1;
- /* Connected via non-PCIe: no ATS */
- if (!pci_is_pcie(bridge) ||
- pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
- /* If we found the root port, look it up in the ATSR */
- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
- break;
- }
- rcu_read_lock();
- list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- if (atsr->segment != pci_domain_nr(dev->bus))
- continue;
- for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
- if (tmp == &bridge->dev)
- goto out;
- if (atsru->include_all)
- goto out;
- }
- ret = 0;
- out:
- rcu_read_unlock();
- return ret;
- }
- int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
- {
- int ret = 0;
- struct dmar_rmrr_unit *rmrru;
- struct dmar_atsr_unit *atsru;
- struct acpi_dmar_atsr *atsr;
- struct acpi_dmar_reserved_memory *rmrr;
- if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
- return 0;
- list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
- rmrr = container_of(rmrru->hdr,
- struct acpi_dmar_reserved_memory, header);
- if (info->event == BUS_NOTIFY_ADD_DEVICE) {
- ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- rmrr->segment, rmrru->devices,
- rmrru->devices_cnt);
- if(ret < 0)
- return ret;
- } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
- dmar_remove_dev_scope(info, rmrr->segment,
- rmrru->devices, rmrru->devices_cnt);
- }
- }
- list_for_each_entry(atsru, &dmar_atsr_units, list) {
- if (atsru->include_all)
- continue;
- atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- if (info->event == BUS_NOTIFY_ADD_DEVICE) {
- ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- atsr->segment, atsru->devices,
- atsru->devices_cnt);
- if (ret > 0)
- break;
- else if(ret < 0)
- return ret;
- } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
- if (dmar_remove_dev_scope(info, atsr->segment,
- atsru->devices, atsru->devices_cnt))
- break;
- }
- }
- return 0;
- }
- /*
- * Here we only respond to action of unbound device from driver.
- *
- * Added device is not attached to its DMAR domain here yet. That will happen
- * when mapping the device to iova.
- */
- static int device_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
- {
- struct device *dev = data;
- struct dmar_domain *domain;
- if (iommu_dummy(dev))
- return 0;
- if (action != BUS_NOTIFY_REMOVED_DEVICE)
- return 0;
- domain = find_domain(dev);
- if (!domain)
- return 0;
- dmar_remove_one_dev_info(domain, dev);
- if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
- domain_exit(domain);
- return 0;
- }
- static struct notifier_block device_nb = {
- .notifier_call = device_notifier,
- };
- static int intel_iommu_memory_notifier(struct notifier_block *nb,
- unsigned long val, void *v)
- {
- struct memory_notify *mhp = v;
- unsigned long long start, end;
- unsigned long start_vpfn, last_vpfn;
- switch (val) {
- case MEM_GOING_ONLINE:
- start = mhp->start_pfn << PAGE_SHIFT;
- end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
- if (iommu_domain_identity_map(si_domain, start, end)) {
- pr_warn("Failed to build identity map for [%llx-%llx]\n",
- start, end);
- return NOTIFY_BAD;
- }
- break;
- case MEM_OFFLINE:
- case MEM_CANCEL_ONLINE:
- start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
- last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
- while (start_vpfn <= last_vpfn) {
- struct iova *iova;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- struct page *freelist;
- iova = find_iova(&si_domain->iovad, start_vpfn);
- if (iova == NULL) {
- pr_debug("Failed get IOVA for PFN %lx\n",
- start_vpfn);
- break;
- }
- iova = split_and_remove_iova(&si_domain->iovad, iova,
- start_vpfn, last_vpfn);
- if (iova == NULL) {
- pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
- start_vpfn, last_vpfn);
- return NOTIFY_BAD;
- }
- freelist = domain_unmap(si_domain, iova->pfn_lo,
- iova->pfn_hi);
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd)
- iommu_flush_iotlb_psi(iommu, si_domain,
- iova->pfn_lo, iova_size(iova),
- !freelist, 0);
- rcu_read_unlock();
- dma_free_pagelist(freelist);
- start_vpfn = iova->pfn_hi + 1;
- free_iova_mem(iova);
- }
- break;
- }
- return NOTIFY_OK;
- }
- static struct notifier_block intel_iommu_memory_nb = {
- .notifier_call = intel_iommu_memory_notifier,
- .priority = 0
- };
- static void free_all_cpu_cached_iovas(unsigned int cpu)
- {
- int i;
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- struct dmar_domain *domain;
- int did;
- if (!iommu)
- continue;
- for (did = 0; did < cap_ndoms(iommu->cap); did++) {
- domain = get_iommu_domain(iommu, (u16)did);
- if (!domain)
- continue;
- free_cpu_cached_iovas(cpu, &domain->iovad);
- }
- }
- }
- static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
- unsigned long action, void *v)
- {
- unsigned int cpu = (unsigned long)v;
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_all_cpu_cached_iovas(cpu);
- flush_unmaps_timeout(cpu);
- break;
- }
- return NOTIFY_OK;
- }
- static struct notifier_block intel_iommu_cpu_nb = {
- .notifier_call = intel_iommu_cpu_notifier,
- };
- static ssize_t intel_iommu_show_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- u32 ver = readl(iommu->reg + DMAR_VER_REG);
- return sprintf(buf, "%d:%d\n",
- DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
- }
- static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
- static ssize_t intel_iommu_show_address(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%llx\n", iommu->reg_phys);
- }
- static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
- static ssize_t intel_iommu_show_cap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%llx\n", iommu->cap);
- }
- static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
- static ssize_t intel_iommu_show_ecap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%llx\n", iommu->ecap);
- }
- static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
- static ssize_t intel_iommu_show_ndoms(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
- }
- static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
- static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct intel_iommu *iommu = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
- cap_ndoms(iommu->cap)));
- }
- static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
- static struct attribute *intel_iommu_attrs[] = {
- &dev_attr_version.attr,
- &dev_attr_address.attr,
- &dev_attr_cap.attr,
- &dev_attr_ecap.attr,
- &dev_attr_domains_supported.attr,
- &dev_attr_domains_used.attr,
- NULL,
- };
- static struct attribute_group intel_iommu_group = {
- .name = "intel-iommu",
- .attrs = intel_iommu_attrs,
- };
- const struct attribute_group *intel_iommu_groups[] = {
- &intel_iommu_group,
- NULL,
- };
- int __init intel_iommu_init(void)
- {
- int ret = -ENODEV;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- /* VT-d is required for a TXT/tboot launch, so enforce that */
- force_on = tboot_force_iommu();
- if (iommu_init_mempool()) {
- if (force_on)
- panic("tboot: Failed to initialize iommu memory\n");
- return -ENOMEM;
- }
- down_write(&dmar_global_lock);
- if (dmar_table_init()) {
- if (force_on)
- panic("tboot: Failed to initialize DMAR table\n");
- goto out_free_dmar;
- }
- if (dmar_dev_scope_init() < 0) {
- if (force_on)
- panic("tboot: Failed to initialize DMAR device scope\n");
- goto out_free_dmar;
- }
- if (no_iommu || dmar_disabled)
- goto out_free_dmar;
- if (list_empty(&dmar_rmrr_units))
- pr_info("No RMRR found\n");
- if (list_empty(&dmar_atsr_units))
- pr_info("No ATSR found\n");
- if (dmar_init_reserved_ranges()) {
- if (force_on)
- panic("tboot: Failed to reserve iommu ranges\n");
- goto out_free_reserved_range;
- }
- if (dmar_map_gfx)
- intel_iommu_gfx_mapped = 1;
- init_no_remapping_devices();
- ret = init_dmars();
- if (ret) {
- if (force_on)
- panic("tboot: Failed to initialize DMARs\n");
- pr_err("Initialization failed\n");
- goto out_free_reserved_range;
- }
- up_write(&dmar_global_lock);
- pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
- #ifdef CONFIG_SWIOTLB
- swiotlb = 0;
- #endif
- dma_ops = &intel_dma_ops;
- init_iommu_pm_ops();
- for_each_active_iommu(iommu, drhd)
- iommu->iommu_dev = iommu_device_create(NULL, iommu,
- intel_iommu_groups,
- "%s", iommu->name);
- bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
- bus_register_notifier(&pci_bus_type, &device_nb);
- if (si_domain && !hw_pass_through)
- register_memory_notifier(&intel_iommu_memory_nb);
- register_hotcpu_notifier(&intel_iommu_cpu_nb);
- intel_iommu_enabled = 1;
- return 0;
- out_free_reserved_range:
- put_iova_domain(&reserved_iova_list);
- out_free_dmar:
- intel_iommu_free_dmars();
- up_write(&dmar_global_lock);
- iommu_exit_mempool();
- return ret;
- }
- static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
- {
- struct intel_iommu *iommu = opaque;
- domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
- return 0;
- }
- /*
- * NB - intel-iommu lacks any sort of reference counting for the users of
- * dependent devices. If multiple endpoints have intersecting dependent
- * devices, unbinding the driver from any one of them will possibly leave
- * the others unable to operate.
- */
- static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
- {
- if (!iommu || !dev || !dev_is_pci(dev))
- return;
- pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
- }
- static void __dmar_remove_one_dev_info(struct device_domain_info *info)
- {
- struct intel_iommu *iommu;
- unsigned long flags;
- assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
- iommu = info->iommu;
- if (info->dev) {
- iommu_disable_dev_iotlb(info);
- domain_context_clear(iommu, info->dev);
- }
- unlink_domain_info(info);
- spin_lock_irqsave(&iommu->lock, flags);
- domain_detach_iommu(info->domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
- free_devinfo_mem(info);
- }
- static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev)
- {
- struct device_domain_info *info;
- unsigned long flags;
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- }
- static int md_domain_init(struct dmar_domain *domain, int guest_width)
- {
- int adjust_width;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
- domain_reserve_special_ranges(domain);
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
- }
- static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
- {
- struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
- dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
- if (!dmar_domain) {
- pr_err("Can't allocate dmar_domain\n");
- return NULL;
- }
- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- pr_err("Domain initialization failed\n");
- domain_exit(dmar_domain);
- return NULL;
- }
- domain_update_iommu_cap(dmar_domain);
- domain = &dmar_domain->domain;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
- domain->geometry.force_aperture = true;
- return domain;
- }
- static void intel_iommu_domain_free(struct iommu_domain *domain)
- {
- domain_exit(to_dmar_domain(domain));
- }
- static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
- int addr_width;
- u8 bus, devfn;
- if (device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
- /* normally dev is not mapped */
- if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
- old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
- dmar_remove_one_dev_info(old_domain, dev);
- rcu_read_unlock();
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
- }
- }
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- /* check if this iommu agaw is sufficient for max mapped address */
- addr_width = agaw_to_width(iommu->agaw);
- if (addr_width > cap_mgaw(iommu->cap))
- addr_width = cap_mgaw(iommu->cap);
- if (dmar_domain->max_addr > (1LL << addr_width)) {
- pr_err("%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, addr_width, dmar_domain->max_addr);
- return -EFAULT;
- }
- dmar_domain->gaw = addr_width;
- /*
- * Knock out extra levels of page tables if necessary
- */
- while (iommu->agaw < dmar_domain->agaw) {
- struct dma_pte *pte;
- pte = dmar_domain->pgd;
- if (dma_pte_present(pte)) {
- dmar_domain->pgd = (struct dma_pte *)
- phys_to_virt(dma_pte_addr(pte));
- free_pgtable_page(pte);
- }
- dmar_domain->agaw--;
- }
- return domain_add_dev_info(dmar_domain, dev);
- }
- static void intel_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
- {
- dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
- }
- static int intel_iommu_map(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- u64 max_addr;
- int prot = 0;
- int ret;
- if (iommu_prot & IOMMU_READ)
- prot |= DMA_PTE_READ;
- if (iommu_prot & IOMMU_WRITE)
- prot |= DMA_PTE_WRITE;
- if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
- prot |= DMA_PTE_SNP;
- max_addr = iova + size;
- if (dmar_domain->max_addr < max_addr) {
- u64 end;
- /* check if minimum agaw is sufficient for mapped address */
- end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
- if (end < max_addr) {
- pr_err("%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, dmar_domain->gaw, max_addr);
- return -EFAULT;
- }
- dmar_domain->max_addr = max_addr;
- }
- /* Round up size to next multiple of PAGE_SIZE, if it and
- the low bits of hpa would take us onto the next page */
- size = aligned_nrpages(hpa, size);
- ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
- return ret;
- }
- static size_t intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct page *freelist = NULL;
- struct intel_iommu *iommu;
- unsigned long start_pfn, last_pfn;
- unsigned int npages;
- int iommu_id, level = 0;
- /* Cope with horrid API which requires us to unmap more than the
- size argument if it happens to be a large-page mapping. */
- BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
- if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
- size = VTD_PAGE_SIZE << level_to_offset_bits(level);
- start_pfn = iova >> VTD_PAGE_SHIFT;
- last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
- freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
- npages = last_pfn - start_pfn + 1;
- for_each_domain_iommu(iommu_id, dmar_domain) {
- iommu = g_iommus[iommu_id];
- iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, npages, !freelist, 0);
- }
- dma_free_pagelist(freelist);
- if (dmar_domain->max_addr == iova + size)
- dmar_domain->max_addr = iova;
- return size;
- }
- static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
- {
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct dma_pte *pte;
- int level = 0;
- u64 phys = 0;
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
- if (pte)
- phys = dma_pte_addr(pte);
- return phys;
- }
- static bool intel_iommu_capable(enum iommu_cap cap)
- {
- if (cap == IOMMU_CAP_CACHE_COHERENCY)
- return domain_update_iommu_snooping(NULL) == 1;
- if (cap == IOMMU_CAP_INTR_REMAP)
- return irq_remapping_enabled == 1;
- return false;
- }
- static int intel_iommu_add_device(struct device *dev)
- {
- struct intel_iommu *iommu;
- struct iommu_group *group;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
- iommu_device_link(iommu->iommu_dev, dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
- iommu_group_put(group);
- return 0;
- }
- static void intel_iommu_remove_device(struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return;
- iommu_group_remove_device(dev);
- iommu_device_unlink(iommu->iommu_dev, dev);
- }
- #ifdef CONFIG_INTEL_IOMMU_SVM
- #define MAX_NR_PASID_BITS (20)
- static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
- {
- /*
- * Convert ecap_pss to extend context entry pts encoding, also
- * respect the soft pasid_max value set by the iommu.
- * - number of PASID bits = ecap_pss + 1
- * - number of PASID table entries = 2^(pts + 5)
- * Therefore, pts = ecap_pss - 4
- * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
- */
- if (ecap_pss(iommu->ecap) < 5)
- return 0;
- /* pasid_max is encoded as actual number of entries not the bits */
- return find_first_bit((unsigned long *)&iommu->pasid_max,
- MAX_NR_PASID_BITS) - 5;
- }
- int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
- {
- struct device_domain_info *info;
- struct context_entry *context;
- struct dmar_domain *domain;
- unsigned long flags;
- u64 ctx_lo;
- int ret;
- domain = get_valid_domain_for_dev(sdev->dev);
- if (!domain)
- return -EINVAL;
- spin_lock_irqsave(&device_domain_lock, flags);
- spin_lock(&iommu->lock);
- ret = -EINVAL;
- info = sdev->dev->archdata.iommu;
- if (!info || !info->pasid_supported)
- goto out;
- context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
- if (WARN_ON(!context))
- goto out;
- ctx_lo = context[0].lo;
- sdev->did = domain->iommu_did[iommu->seq_id];
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
- if (!(ctx_lo & CONTEXT_PASIDE)) {
- context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
- context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
- intel_iommu_get_pts(iommu);
- wmb();
- /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
- * extended to permit requests-with-PASID if the PASIDE bit
- * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
- * however, the PASIDE bit is ignored and requests-with-PASID
- * are unconditionally blocked. Which makes less sense.
- * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
- * "guest mode" translation types depending on whether ATS
- * is available or not. Annoyingly, we can't use the new
- * modes *unless* PASIDE is set. */
- if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
- ctx_lo &= ~CONTEXT_TT_MASK;
- if (info->ats_supported)
- ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
- else
- ctx_lo |= CONTEXT_TT_PT_PASID << 2;
- }
- ctx_lo |= CONTEXT_PASIDE;
- if (iommu->pasid_state_table)
- ctx_lo |= CONTEXT_DINVE;
- if (info->pri_supported)
- ctx_lo |= CONTEXT_PRS;
- context[0].lo = ctx_lo;
- wmb();
- iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- }
- /* Enable PASID support in the device, if it wasn't already */
- if (!info->pasid_enabled)
- iommu_enable_dev_iotlb(info);
- if (info->ats_enabled) {
- sdev->dev_iotlb = 1;
- sdev->qdep = info->ats_qdep;
- if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
- sdev->qdep = 0;
- }
- ret = 0;
- out:
- spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return ret;
- }
- struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
- {
- struct intel_iommu *iommu;
- u8 bus, devfn;
- if (iommu_dummy(dev)) {
- dev_warn(dev,
- "No IOMMU translation for device; cannot enable SVM\n");
- return NULL;
- }
- iommu = device_to_iommu(dev, &bus, &devfn);
- if ((!iommu)) {
- dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
- return NULL;
- }
- if (!iommu->pasid_table) {
- dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
- return NULL;
- }
- return iommu;
- }
- #endif /* CONFIG_INTEL_IOMMU_SVM */
- static const struct iommu_ops intel_iommu_ops = {
- .capable = intel_iommu_capable,
- .domain_alloc = intel_iommu_domain_alloc,
- .domain_free = intel_iommu_domain_free,
- .attach_dev = intel_iommu_attach_device,
- .detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map,
- .unmap = intel_iommu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
- .device_group = pci_device_group,
- .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
- };
- static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
- {
- /* G4x/GM45 integrated gfx dmar support is totally busted. */
- pr_info("Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
- }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
- static void quirk_iommu_rwbf(struct pci_dev *dev)
- {
- /*
- * Mobile 4 Series Chipset neglects to set RWBF capability,
- * but needs it. Same seems to hold for the desktop versions.
- */
- pr_info("Forcing write-buffer flush capability\n");
- rwbf_quirk = 1;
- }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
- #define GGC 0x52
- #define GGC_MEMORY_SIZE_MASK (0xf << 8)
- #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
- #define GGC_MEMORY_SIZE_1M (0x1 << 8)
- #define GGC_MEMORY_SIZE_2M (0x3 << 8)
- #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
- #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
- #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
- #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
- static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
- {
- unsigned short ggc;
- if (pci_read_config_word(dev, GGC, &ggc))
- return;
- if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
- pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
- dmar_map_gfx = 0;
- } else if (dmar_map_gfx) {
- /* we have to ensure the gfx device is idle before we flush */
- pr_info("Disabling batched IOTLB flush on Ironlake\n");
- intel_iommu_strict = 1;
- }
- }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
- /* On Tylersburg chipsets, some BIOSes have been known to enable the
- ISOCH DMAR unit for the Azalia sound device, but not give it any
- TLB entries, which causes it to deadlock. Check for that. We do
- this in a function called from init_dmars(), instead of in a PCI
- quirk, because we don't want to print the obnoxious "BIOS broken"
- message if VT-d is actually disabled.
- */
- static void __init check_tylersburg_isoch(void)
- {
- struct pci_dev *pdev;
- uint32_t vtisochctrl;
- /* If there's no Azalia in the system anyway, forget it. */
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
- if (!pdev)
- return;
- pci_dev_put(pdev);
- /* System Management Registers. Might be hidden, in which case
- we can't do the sanity check. But that's OK, because the
- known-broken BIOSes _don't_ actually hide it, so far. */
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
- if (!pdev)
- return;
- if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
- pci_dev_put(pdev);
- return;
- }
- pci_dev_put(pdev);
- /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
- if (vtisochctrl & 1)
- return;
- /* Drop all bits other than the number of TLB entries */
- vtisochctrl &= 0x1c;
- /* If we have the recommended number of TLB entries (16), fine. */
- if (vtisochctrl == 0x10)
- return;
- /* Zero TLB entries? You get to ride the short bus to school. */
- if (!vtisochctrl) {
- WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- iommu_identity_mapping |= IDENTMAP_AZALIA;
- return;
- }
- pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
- vtisochctrl);
- }
|