smartpqi_init.c 171 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310
  1. /*
  2. * driver for Microsemi PQI-based storage controllers
  3. * Copyright (c) 2016 Microsemi Corporation
  4. * Copyright (c) 2016 PMC-Sierra, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more details.
  14. *
  15. * Questions/Comments/Bugfixes to [email protected]
  16. *
  17. */
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/pci.h>
  21. #include <linux/delay.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/sched.h>
  24. #include <linux/rtc.h>
  25. #include <linux/bcd.h>
  26. #include <linux/cciss_ioctl.h>
  27. #include <scsi/scsi_host.h>
  28. #include <scsi/scsi_cmnd.h>
  29. #include <scsi/scsi_device.h>
  30. #include <scsi/scsi_eh.h>
  31. #include <scsi/scsi_transport_sas.h>
  32. #include <asm/unaligned.h>
  33. #include "smartpqi.h"
  34. #include "smartpqi_sis.h"
  35. #if !defined(BUILD_TIMESTAMP)
  36. #define BUILD_TIMESTAMP
  37. #endif
  38. #define DRIVER_VERSION "0.9.13-370"
  39. #define DRIVER_MAJOR 0
  40. #define DRIVER_MINOR 9
  41. #define DRIVER_RELEASE 13
  42. #define DRIVER_REVISION 370
  43. #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
  44. #define DRIVER_NAME_SHORT "smartpqi"
  45. MODULE_AUTHOR("Microsemi");
  46. MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
  47. DRIVER_VERSION);
  48. MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
  49. MODULE_VERSION(DRIVER_VERSION);
  50. MODULE_LICENSE("GPL");
  51. #define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
  52. static char *hpe_branded_controller = "HPE Smart Array Controller";
  53. static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
  54. static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
  55. static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
  56. static void pqi_scan_start(struct Scsi_Host *shost);
  57. static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
  58. struct pqi_queue_group *queue_group, enum pqi_io_path path,
  59. struct pqi_io_request *io_request);
  60. static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
  61. struct pqi_iu_header *request, unsigned int flags,
  62. struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
  63. static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
  64. struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
  65. unsigned int cdb_length, struct pqi_queue_group *queue_group,
  66. struct pqi_encryption_info *encryption_info);
  67. /* for flags argument to pqi_submit_raid_request_synchronous() */
  68. #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
  69. static struct scsi_transport_template *pqi_sas_transport_template;
  70. static atomic_t pqi_controller_count = ATOMIC_INIT(0);
  71. static int pqi_disable_device_id_wildcards;
  72. module_param_named(disable_device_id_wildcards,
  73. pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
  74. MODULE_PARM_DESC(disable_device_id_wildcards,
  75. "Disable device ID wildcards.");
  76. static char *raid_levels[] = {
  77. "RAID-0",
  78. "RAID-4",
  79. "RAID-1(1+0)",
  80. "RAID-5",
  81. "RAID-5+1",
  82. "RAID-ADG",
  83. "RAID-1(ADM)",
  84. };
  85. static char *pqi_raid_level_to_string(u8 raid_level)
  86. {
  87. if (raid_level < ARRAY_SIZE(raid_levels))
  88. return raid_levels[raid_level];
  89. return "";
  90. }
  91. #define SA_RAID_0 0
  92. #define SA_RAID_4 1
  93. #define SA_RAID_1 2 /* also used for RAID 10 */
  94. #define SA_RAID_5 3 /* also used for RAID 50 */
  95. #define SA_RAID_51 4
  96. #define SA_RAID_6 5 /* also used for RAID 60 */
  97. #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
  98. #define SA_RAID_MAX SA_RAID_ADM
  99. #define SA_RAID_UNKNOWN 0xff
  100. static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
  101. {
  102. scmd->scsi_done(scmd);
  103. }
  104. static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
  105. {
  106. return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
  107. }
  108. static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
  109. {
  110. void *hostdata = shost_priv(shost);
  111. return *((struct pqi_ctrl_info **)hostdata);
  112. }
  113. static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
  114. {
  115. return !device->is_physical_device;
  116. }
  117. static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
  118. {
  119. return !ctrl_info->controller_online;
  120. }
  121. static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
  122. {
  123. if (ctrl_info->controller_online)
  124. if (!sis_is_firmware_running(ctrl_info))
  125. pqi_take_ctrl_offline(ctrl_info);
  126. }
  127. static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
  128. {
  129. return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
  130. }
  131. static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
  132. struct pqi_ctrl_info *ctrl_info)
  133. {
  134. return sis_read_driver_scratch(ctrl_info);
  135. }
  136. static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
  137. enum pqi_ctrl_mode mode)
  138. {
  139. sis_write_driver_scratch(ctrl_info, mode);
  140. }
  141. #define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
  142. static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
  143. {
  144. schedule_delayed_work(&ctrl_info->rescan_work,
  145. PQI_RESCAN_WORK_INTERVAL);
  146. }
  147. static int pqi_map_single(struct pci_dev *pci_dev,
  148. struct pqi_sg_descriptor *sg_descriptor, void *buffer,
  149. size_t buffer_length, int data_direction)
  150. {
  151. dma_addr_t bus_address;
  152. if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
  153. return 0;
  154. bus_address = pci_map_single(pci_dev, buffer, buffer_length,
  155. data_direction);
  156. if (pci_dma_mapping_error(pci_dev, bus_address))
  157. return -ENOMEM;
  158. put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
  159. put_unaligned_le32(buffer_length, &sg_descriptor->length);
  160. put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
  161. return 0;
  162. }
  163. static void pqi_pci_unmap(struct pci_dev *pci_dev,
  164. struct pqi_sg_descriptor *descriptors, int num_descriptors,
  165. int data_direction)
  166. {
  167. int i;
  168. if (data_direction == PCI_DMA_NONE)
  169. return;
  170. for (i = 0; i < num_descriptors; i++)
  171. pci_unmap_single(pci_dev,
  172. (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
  173. get_unaligned_le32(&descriptors[i].length),
  174. data_direction);
  175. }
  176. static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
  177. struct pqi_raid_path_request *request, u8 cmd,
  178. u8 *scsi3addr, void *buffer, size_t buffer_length,
  179. u16 vpd_page, int *pci_direction)
  180. {
  181. u8 *cdb;
  182. int pci_dir;
  183. memset(request, 0, sizeof(*request));
  184. request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
  185. put_unaligned_le16(offsetof(struct pqi_raid_path_request,
  186. sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
  187. &request->header.iu_length);
  188. put_unaligned_le32(buffer_length, &request->buffer_length);
  189. memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
  190. request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  191. request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
  192. cdb = request->cdb;
  193. switch (cmd) {
  194. case INQUIRY:
  195. request->data_direction = SOP_READ_FLAG;
  196. cdb[0] = INQUIRY;
  197. if (vpd_page & VPD_PAGE) {
  198. cdb[1] = 0x1;
  199. cdb[2] = (u8)vpd_page;
  200. }
  201. cdb[4] = (u8)buffer_length;
  202. break;
  203. case CISS_REPORT_LOG:
  204. case CISS_REPORT_PHYS:
  205. request->data_direction = SOP_READ_FLAG;
  206. cdb[0] = cmd;
  207. if (cmd == CISS_REPORT_PHYS)
  208. cdb[1] = CISS_REPORT_PHYS_EXTENDED;
  209. else
  210. cdb[1] = CISS_REPORT_LOG_EXTENDED;
  211. put_unaligned_be32(buffer_length, &cdb[6]);
  212. break;
  213. case CISS_GET_RAID_MAP:
  214. request->data_direction = SOP_READ_FLAG;
  215. cdb[0] = CISS_READ;
  216. cdb[1] = CISS_GET_RAID_MAP;
  217. put_unaligned_be32(buffer_length, &cdb[6]);
  218. break;
  219. case SA_CACHE_FLUSH:
  220. request->data_direction = SOP_WRITE_FLAG;
  221. cdb[0] = BMIC_WRITE;
  222. cdb[6] = BMIC_CACHE_FLUSH;
  223. put_unaligned_be16(buffer_length, &cdb[7]);
  224. break;
  225. case BMIC_IDENTIFY_CONTROLLER:
  226. case BMIC_IDENTIFY_PHYSICAL_DEVICE:
  227. request->data_direction = SOP_READ_FLAG;
  228. cdb[0] = BMIC_READ;
  229. cdb[6] = cmd;
  230. put_unaligned_be16(buffer_length, &cdb[7]);
  231. break;
  232. case BMIC_WRITE_HOST_WELLNESS:
  233. request->data_direction = SOP_WRITE_FLAG;
  234. cdb[0] = BMIC_WRITE;
  235. cdb[6] = cmd;
  236. put_unaligned_be16(buffer_length, &cdb[7]);
  237. break;
  238. default:
  239. dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
  240. cmd);
  241. WARN_ON(cmd);
  242. break;
  243. }
  244. switch (request->data_direction) {
  245. case SOP_READ_FLAG:
  246. pci_dir = PCI_DMA_FROMDEVICE;
  247. break;
  248. case SOP_WRITE_FLAG:
  249. pci_dir = PCI_DMA_TODEVICE;
  250. break;
  251. case SOP_NO_DIRECTION_FLAG:
  252. pci_dir = PCI_DMA_NONE;
  253. break;
  254. default:
  255. pci_dir = PCI_DMA_BIDIRECTIONAL;
  256. break;
  257. }
  258. *pci_direction = pci_dir;
  259. return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
  260. buffer, buffer_length, pci_dir);
  261. }
  262. static struct pqi_io_request *pqi_alloc_io_request(
  263. struct pqi_ctrl_info *ctrl_info)
  264. {
  265. struct pqi_io_request *io_request;
  266. u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
  267. while (1) {
  268. io_request = &ctrl_info->io_request_pool[i];
  269. if (atomic_inc_return(&io_request->refcount) == 1)
  270. break;
  271. atomic_dec(&io_request->refcount);
  272. i = (i + 1) % ctrl_info->max_io_slots;
  273. }
  274. /* benignly racy */
  275. ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
  276. io_request->scmd = NULL;
  277. io_request->status = 0;
  278. io_request->error_info = NULL;
  279. return io_request;
  280. }
  281. static void pqi_free_io_request(struct pqi_io_request *io_request)
  282. {
  283. atomic_dec(&io_request->refcount);
  284. }
  285. static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
  286. struct bmic_identify_controller *buffer)
  287. {
  288. int rc;
  289. int pci_direction;
  290. struct pqi_raid_path_request request;
  291. rc = pqi_build_raid_path_request(ctrl_info, &request,
  292. BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
  293. sizeof(*buffer), 0, &pci_direction);
  294. if (rc)
  295. return rc;
  296. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  297. NULL, NO_TIMEOUT);
  298. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  299. pci_direction);
  300. return rc;
  301. }
  302. static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
  303. u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
  304. {
  305. int rc;
  306. int pci_direction;
  307. struct pqi_raid_path_request request;
  308. rc = pqi_build_raid_path_request(ctrl_info, &request,
  309. INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
  310. &pci_direction);
  311. if (rc)
  312. return rc;
  313. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  314. NULL, NO_TIMEOUT);
  315. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  316. pci_direction);
  317. return rc;
  318. }
  319. static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
  320. struct pqi_scsi_dev *device,
  321. struct bmic_identify_physical_device *buffer,
  322. size_t buffer_length)
  323. {
  324. int rc;
  325. int pci_direction;
  326. u16 bmic_device_index;
  327. struct pqi_raid_path_request request;
  328. rc = pqi_build_raid_path_request(ctrl_info, &request,
  329. BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
  330. buffer_length, 0, &pci_direction);
  331. if (rc)
  332. return rc;
  333. bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
  334. request.cdb[2] = (u8)bmic_device_index;
  335. request.cdb[9] = (u8)(bmic_device_index >> 8);
  336. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  337. 0, NULL, NO_TIMEOUT);
  338. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  339. pci_direction);
  340. return rc;
  341. }
  342. #define SA_CACHE_FLUSH_BUFFER_LENGTH 4
  343. static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
  344. {
  345. int rc;
  346. struct pqi_raid_path_request request;
  347. int pci_direction;
  348. u8 *buffer;
  349. /*
  350. * Don't bother trying to flush the cache if the controller is
  351. * locked up.
  352. */
  353. if (pqi_ctrl_offline(ctrl_info))
  354. return -ENXIO;
  355. buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
  356. if (!buffer)
  357. return -ENOMEM;
  358. rc = pqi_build_raid_path_request(ctrl_info, &request,
  359. SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
  360. SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
  361. if (rc)
  362. goto out;
  363. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  364. 0, NULL, NO_TIMEOUT);
  365. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  366. pci_direction);
  367. out:
  368. kfree(buffer);
  369. return rc;
  370. }
  371. static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
  372. void *buffer, size_t buffer_length)
  373. {
  374. int rc;
  375. struct pqi_raid_path_request request;
  376. int pci_direction;
  377. rc = pqi_build_raid_path_request(ctrl_info, &request,
  378. BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
  379. buffer_length, 0, &pci_direction);
  380. if (rc)
  381. return rc;
  382. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  383. 0, NULL, NO_TIMEOUT);
  384. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  385. pci_direction);
  386. return rc;
  387. }
  388. #pragma pack(1)
  389. struct bmic_host_wellness_driver_version {
  390. u8 start_tag[4];
  391. u8 driver_version_tag[2];
  392. __le16 driver_version_length;
  393. char driver_version[32];
  394. u8 dont_write_tag[2];
  395. u8 end_tag[2];
  396. };
  397. #pragma pack()
  398. static int pqi_write_driver_version_to_host_wellness(
  399. struct pqi_ctrl_info *ctrl_info)
  400. {
  401. int rc;
  402. struct bmic_host_wellness_driver_version *buffer;
  403. size_t buffer_length;
  404. buffer_length = sizeof(*buffer);
  405. buffer = kmalloc(buffer_length, GFP_KERNEL);
  406. if (!buffer)
  407. return -ENOMEM;
  408. buffer->start_tag[0] = '<';
  409. buffer->start_tag[1] = 'H';
  410. buffer->start_tag[2] = 'W';
  411. buffer->start_tag[3] = '>';
  412. buffer->driver_version_tag[0] = 'D';
  413. buffer->driver_version_tag[1] = 'V';
  414. put_unaligned_le16(sizeof(buffer->driver_version),
  415. &buffer->driver_version_length);
  416. strncpy(buffer->driver_version, DRIVER_VERSION,
  417. sizeof(buffer->driver_version) - 1);
  418. buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
  419. buffer->dont_write_tag[0] = 'D';
  420. buffer->dont_write_tag[1] = 'W';
  421. buffer->end_tag[0] = 'Z';
  422. buffer->end_tag[1] = 'Z';
  423. rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
  424. kfree(buffer);
  425. return rc;
  426. }
  427. #pragma pack(1)
  428. struct bmic_host_wellness_time {
  429. u8 start_tag[4];
  430. u8 time_tag[2];
  431. __le16 time_length;
  432. u8 time[8];
  433. u8 dont_write_tag[2];
  434. u8 end_tag[2];
  435. };
  436. #pragma pack()
  437. static int pqi_write_current_time_to_host_wellness(
  438. struct pqi_ctrl_info *ctrl_info)
  439. {
  440. int rc;
  441. struct bmic_host_wellness_time *buffer;
  442. size_t buffer_length;
  443. time64_t local_time;
  444. unsigned int year;
  445. struct tm tm;
  446. buffer_length = sizeof(*buffer);
  447. buffer = kmalloc(buffer_length, GFP_KERNEL);
  448. if (!buffer)
  449. return -ENOMEM;
  450. buffer->start_tag[0] = '<';
  451. buffer->start_tag[1] = 'H';
  452. buffer->start_tag[2] = 'W';
  453. buffer->start_tag[3] = '>';
  454. buffer->time_tag[0] = 'T';
  455. buffer->time_tag[1] = 'D';
  456. put_unaligned_le16(sizeof(buffer->time),
  457. &buffer->time_length);
  458. local_time = ktime_get_real_seconds();
  459. time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
  460. year = tm.tm_year + 1900;
  461. buffer->time[0] = bin2bcd(tm.tm_hour);
  462. buffer->time[1] = bin2bcd(tm.tm_min);
  463. buffer->time[2] = bin2bcd(tm.tm_sec);
  464. buffer->time[3] = 0;
  465. buffer->time[4] = bin2bcd(tm.tm_mon + 1);
  466. buffer->time[5] = bin2bcd(tm.tm_mday);
  467. buffer->time[6] = bin2bcd(year / 100);
  468. buffer->time[7] = bin2bcd(year % 100);
  469. buffer->dont_write_tag[0] = 'D';
  470. buffer->dont_write_tag[1] = 'W';
  471. buffer->end_tag[0] = 'Z';
  472. buffer->end_tag[1] = 'Z';
  473. rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
  474. kfree(buffer);
  475. return rc;
  476. }
  477. #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
  478. static void pqi_update_time_worker(struct work_struct *work)
  479. {
  480. int rc;
  481. struct pqi_ctrl_info *ctrl_info;
  482. ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
  483. update_time_work);
  484. rc = pqi_write_current_time_to_host_wellness(ctrl_info);
  485. if (rc)
  486. dev_warn(&ctrl_info->pci_dev->dev,
  487. "error updating time on controller\n");
  488. schedule_delayed_work(&ctrl_info->update_time_work,
  489. PQI_UPDATE_TIME_WORK_INTERVAL);
  490. }
  491. static inline void pqi_schedule_update_time_worker(
  492. struct pqi_ctrl_info *ctrl_info)
  493. {
  494. schedule_delayed_work(&ctrl_info->update_time_work, 0);
  495. }
  496. static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
  497. void *buffer, size_t buffer_length)
  498. {
  499. int rc;
  500. int pci_direction;
  501. struct pqi_raid_path_request request;
  502. rc = pqi_build_raid_path_request(ctrl_info, &request,
  503. cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
  504. if (rc)
  505. return rc;
  506. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  507. NULL, NO_TIMEOUT);
  508. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  509. pci_direction);
  510. return rc;
  511. }
  512. static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
  513. void **buffer)
  514. {
  515. int rc;
  516. size_t lun_list_length;
  517. size_t lun_data_length;
  518. size_t new_lun_list_length;
  519. void *lun_data = NULL;
  520. struct report_lun_header *report_lun_header;
  521. report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
  522. if (!report_lun_header) {
  523. rc = -ENOMEM;
  524. goto out;
  525. }
  526. rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
  527. sizeof(*report_lun_header));
  528. if (rc)
  529. goto out;
  530. lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
  531. again:
  532. lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
  533. lun_data = kmalloc(lun_data_length, GFP_KERNEL);
  534. if (!lun_data) {
  535. rc = -ENOMEM;
  536. goto out;
  537. }
  538. if (lun_list_length == 0) {
  539. memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
  540. goto out;
  541. }
  542. rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
  543. if (rc)
  544. goto out;
  545. new_lun_list_length = get_unaligned_be32(
  546. &((struct report_lun_header *)lun_data)->list_length);
  547. if (new_lun_list_length > lun_list_length) {
  548. lun_list_length = new_lun_list_length;
  549. kfree(lun_data);
  550. goto again;
  551. }
  552. out:
  553. kfree(report_lun_header);
  554. if (rc) {
  555. kfree(lun_data);
  556. lun_data = NULL;
  557. }
  558. *buffer = lun_data;
  559. return rc;
  560. }
  561. static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
  562. void **buffer)
  563. {
  564. return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
  565. buffer);
  566. }
  567. static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
  568. void **buffer)
  569. {
  570. return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
  571. }
  572. static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
  573. struct report_phys_lun_extended **physdev_list,
  574. struct report_log_lun_extended **logdev_list)
  575. {
  576. int rc;
  577. size_t logdev_list_length;
  578. size_t logdev_data_length;
  579. struct report_log_lun_extended *internal_logdev_list;
  580. struct report_log_lun_extended *logdev_data;
  581. struct report_lun_header report_lun_header;
  582. rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
  583. if (rc)
  584. dev_err(&ctrl_info->pci_dev->dev,
  585. "report physical LUNs failed\n");
  586. rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
  587. if (rc)
  588. dev_err(&ctrl_info->pci_dev->dev,
  589. "report logical LUNs failed\n");
  590. /*
  591. * Tack the controller itself onto the end of the logical device list.
  592. */
  593. logdev_data = *logdev_list;
  594. if (logdev_data) {
  595. logdev_list_length =
  596. get_unaligned_be32(&logdev_data->header.list_length);
  597. } else {
  598. memset(&report_lun_header, 0, sizeof(report_lun_header));
  599. logdev_data =
  600. (struct report_log_lun_extended *)&report_lun_header;
  601. logdev_list_length = 0;
  602. }
  603. logdev_data_length = sizeof(struct report_lun_header) +
  604. logdev_list_length;
  605. internal_logdev_list = kmalloc(logdev_data_length +
  606. sizeof(struct report_log_lun_extended), GFP_KERNEL);
  607. if (!internal_logdev_list) {
  608. kfree(*logdev_list);
  609. *logdev_list = NULL;
  610. return -ENOMEM;
  611. }
  612. memcpy(internal_logdev_list, logdev_data, logdev_data_length);
  613. memset((u8 *)internal_logdev_list + logdev_data_length, 0,
  614. sizeof(struct report_log_lun_extended_entry));
  615. put_unaligned_be32(logdev_list_length +
  616. sizeof(struct report_log_lun_extended_entry),
  617. &internal_logdev_list->header.list_length);
  618. kfree(*logdev_list);
  619. *logdev_list = internal_logdev_list;
  620. return 0;
  621. }
  622. static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
  623. int bus, int target, int lun)
  624. {
  625. device->bus = bus;
  626. device->target = target;
  627. device->lun = lun;
  628. }
  629. static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
  630. {
  631. u8 *scsi3addr;
  632. u32 lunid;
  633. scsi3addr = device->scsi3addr;
  634. lunid = get_unaligned_le32(scsi3addr);
  635. if (pqi_is_hba_lunid(scsi3addr)) {
  636. /* The specified device is the controller. */
  637. pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
  638. device->target_lun_valid = true;
  639. return;
  640. }
  641. if (pqi_is_logical_device(device)) {
  642. pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
  643. lunid & 0x3fff);
  644. device->target_lun_valid = true;
  645. return;
  646. }
  647. /*
  648. * Defer target and LUN assignment for non-controller physical devices
  649. * because the SAS transport layer will make these assignments later.
  650. */
  651. pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
  652. }
  653. static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
  654. struct pqi_scsi_dev *device)
  655. {
  656. int rc;
  657. u8 raid_level;
  658. u8 *buffer;
  659. raid_level = SA_RAID_UNKNOWN;
  660. buffer = kmalloc(64, GFP_KERNEL);
  661. if (buffer) {
  662. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  663. VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
  664. if (rc == 0) {
  665. raid_level = buffer[8];
  666. if (raid_level > SA_RAID_MAX)
  667. raid_level = SA_RAID_UNKNOWN;
  668. }
  669. kfree(buffer);
  670. }
  671. device->raid_level = raid_level;
  672. }
  673. static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
  674. struct pqi_scsi_dev *device, struct raid_map *raid_map)
  675. {
  676. char *err_msg;
  677. u32 raid_map_size;
  678. u32 r5or6_blocks_per_row;
  679. unsigned int num_phys_disks;
  680. unsigned int num_raid_map_entries;
  681. raid_map_size = get_unaligned_le32(&raid_map->structure_size);
  682. if (raid_map_size < offsetof(struct raid_map, disk_data)) {
  683. err_msg = "RAID map too small";
  684. goto bad_raid_map;
  685. }
  686. if (raid_map_size > sizeof(*raid_map)) {
  687. err_msg = "RAID map too large";
  688. goto bad_raid_map;
  689. }
  690. num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
  691. (get_unaligned_le16(&raid_map->data_disks_per_row) +
  692. get_unaligned_le16(&raid_map->metadata_disks_per_row));
  693. num_raid_map_entries = num_phys_disks *
  694. get_unaligned_le16(&raid_map->row_cnt);
  695. if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
  696. err_msg = "invalid number of map entries in RAID map";
  697. goto bad_raid_map;
  698. }
  699. if (device->raid_level == SA_RAID_1) {
  700. if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
  701. err_msg = "invalid RAID-1 map";
  702. goto bad_raid_map;
  703. }
  704. } else if (device->raid_level == SA_RAID_ADM) {
  705. if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
  706. err_msg = "invalid RAID-1(ADM) map";
  707. goto bad_raid_map;
  708. }
  709. } else if ((device->raid_level == SA_RAID_5 ||
  710. device->raid_level == SA_RAID_6) &&
  711. get_unaligned_le16(&raid_map->layout_map_count) > 1) {
  712. /* RAID 50/60 */
  713. r5or6_blocks_per_row =
  714. get_unaligned_le16(&raid_map->strip_size) *
  715. get_unaligned_le16(&raid_map->data_disks_per_row);
  716. if (r5or6_blocks_per_row == 0) {
  717. err_msg = "invalid RAID-5 or RAID-6 map";
  718. goto bad_raid_map;
  719. }
  720. }
  721. return 0;
  722. bad_raid_map:
  723. dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
  724. return -EINVAL;
  725. }
  726. static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
  727. struct pqi_scsi_dev *device)
  728. {
  729. int rc;
  730. int pci_direction;
  731. struct pqi_raid_path_request request;
  732. struct raid_map *raid_map;
  733. raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
  734. if (!raid_map)
  735. return -ENOMEM;
  736. rc = pqi_build_raid_path_request(ctrl_info, &request,
  737. CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
  738. sizeof(*raid_map), 0, &pci_direction);
  739. if (rc)
  740. goto error;
  741. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  742. NULL, NO_TIMEOUT);
  743. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  744. pci_direction);
  745. if (rc)
  746. goto error;
  747. rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
  748. if (rc)
  749. goto error;
  750. device->raid_map = raid_map;
  751. return 0;
  752. error:
  753. kfree(raid_map);
  754. return rc;
  755. }
  756. static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
  757. struct pqi_scsi_dev *device)
  758. {
  759. int rc;
  760. u8 *buffer;
  761. u8 offload_status;
  762. buffer = kmalloc(64, GFP_KERNEL);
  763. if (!buffer)
  764. return;
  765. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  766. VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
  767. if (rc)
  768. goto out;
  769. #define OFFLOAD_STATUS_BYTE 4
  770. #define OFFLOAD_CONFIGURED_BIT 0x1
  771. #define OFFLOAD_ENABLED_BIT 0x2
  772. offload_status = buffer[OFFLOAD_STATUS_BYTE];
  773. device->offload_configured =
  774. !!(offload_status & OFFLOAD_CONFIGURED_BIT);
  775. if (device->offload_configured) {
  776. device->offload_enabled_pending =
  777. !!(offload_status & OFFLOAD_ENABLED_BIT);
  778. if (pqi_get_raid_map(ctrl_info, device))
  779. device->offload_enabled_pending = false;
  780. }
  781. out:
  782. kfree(buffer);
  783. }
  784. /*
  785. * Use vendor-specific VPD to determine online/offline status of a volume.
  786. */
  787. static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
  788. struct pqi_scsi_dev *device)
  789. {
  790. int rc;
  791. size_t page_length;
  792. u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
  793. bool volume_offline = true;
  794. u32 volume_flags;
  795. struct ciss_vpd_logical_volume_status *vpd;
  796. vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
  797. if (!vpd)
  798. goto no_buffer;
  799. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  800. VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
  801. if (rc)
  802. goto out;
  803. if (vpd->page_code != CISS_VPD_LV_STATUS)
  804. goto out;
  805. page_length = offsetof(struct ciss_vpd_logical_volume_status,
  806. volume_status) + vpd->page_length;
  807. if (page_length < sizeof(*vpd))
  808. goto out;
  809. volume_status = vpd->volume_status;
  810. volume_flags = get_unaligned_be32(&vpd->flags);
  811. volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
  812. out:
  813. kfree(vpd);
  814. no_buffer:
  815. device->volume_status = volume_status;
  816. device->volume_offline = volume_offline;
  817. }
  818. static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
  819. struct pqi_scsi_dev *device)
  820. {
  821. int rc;
  822. u8 *buffer;
  823. buffer = kmalloc(64, GFP_KERNEL);
  824. if (!buffer)
  825. return -ENOMEM;
  826. /* Send an inquiry to the device to see what it is. */
  827. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
  828. if (rc)
  829. goto out;
  830. scsi_sanitize_inquiry_string(&buffer[8], 8);
  831. scsi_sanitize_inquiry_string(&buffer[16], 16);
  832. device->devtype = buffer[0] & 0x1f;
  833. memcpy(device->vendor, &buffer[8],
  834. sizeof(device->vendor));
  835. memcpy(device->model, &buffer[16],
  836. sizeof(device->model));
  837. if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
  838. pqi_get_raid_level(ctrl_info, device);
  839. pqi_get_offload_status(ctrl_info, device);
  840. pqi_get_volume_status(ctrl_info, device);
  841. }
  842. out:
  843. kfree(buffer);
  844. return rc;
  845. }
  846. static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
  847. struct pqi_scsi_dev *device,
  848. struct bmic_identify_physical_device *id_phys)
  849. {
  850. int rc;
  851. memset(id_phys, 0, sizeof(*id_phys));
  852. rc = pqi_identify_physical_device(ctrl_info, device,
  853. id_phys, sizeof(*id_phys));
  854. if (rc) {
  855. device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
  856. return;
  857. }
  858. device->queue_depth =
  859. get_unaligned_le16(&id_phys->current_queue_depth_limit);
  860. device->device_type = id_phys->device_type;
  861. device->active_path_index = id_phys->active_path_number;
  862. device->path_map = id_phys->redundant_path_present_map;
  863. memcpy(&device->box,
  864. &id_phys->alternate_paths_phys_box_on_port,
  865. sizeof(device->box));
  866. memcpy(&device->phys_connector,
  867. &id_phys->alternate_paths_phys_connector,
  868. sizeof(device->phys_connector));
  869. device->bay = id_phys->phys_bay_in_box;
  870. }
  871. static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
  872. struct pqi_scsi_dev *device)
  873. {
  874. char *status;
  875. static const char unknown_state_str[] =
  876. "Volume is in an unknown state (%u)";
  877. char unknown_state_buffer[sizeof(unknown_state_str) + 10];
  878. switch (device->volume_status) {
  879. case CISS_LV_OK:
  880. status = "Volume online";
  881. break;
  882. case CISS_LV_FAILED:
  883. status = "Volume failed";
  884. break;
  885. case CISS_LV_NOT_CONFIGURED:
  886. status = "Volume not configured";
  887. break;
  888. case CISS_LV_DEGRADED:
  889. status = "Volume degraded";
  890. break;
  891. case CISS_LV_READY_FOR_RECOVERY:
  892. status = "Volume ready for recovery operation";
  893. break;
  894. case CISS_LV_UNDERGOING_RECOVERY:
  895. status = "Volume undergoing recovery";
  896. break;
  897. case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
  898. status = "Wrong physical drive was replaced";
  899. break;
  900. case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
  901. status = "A physical drive not properly connected";
  902. break;
  903. case CISS_LV_HARDWARE_OVERHEATING:
  904. status = "Hardware is overheating";
  905. break;
  906. case CISS_LV_HARDWARE_HAS_OVERHEATED:
  907. status = "Hardware has overheated";
  908. break;
  909. case CISS_LV_UNDERGOING_EXPANSION:
  910. status = "Volume undergoing expansion";
  911. break;
  912. case CISS_LV_NOT_AVAILABLE:
  913. status = "Volume waiting for transforming volume";
  914. break;
  915. case CISS_LV_QUEUED_FOR_EXPANSION:
  916. status = "Volume queued for expansion";
  917. break;
  918. case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
  919. status = "Volume disabled due to SCSI ID conflict";
  920. break;
  921. case CISS_LV_EJECTED:
  922. status = "Volume has been ejected";
  923. break;
  924. case CISS_LV_UNDERGOING_ERASE:
  925. status = "Volume undergoing background erase";
  926. break;
  927. case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
  928. status = "Volume ready for predictive spare rebuild";
  929. break;
  930. case CISS_LV_UNDERGOING_RPI:
  931. status = "Volume undergoing rapid parity initialization";
  932. break;
  933. case CISS_LV_PENDING_RPI:
  934. status = "Volume queued for rapid parity initialization";
  935. break;
  936. case CISS_LV_ENCRYPTED_NO_KEY:
  937. status = "Encrypted volume inaccessible - key not present";
  938. break;
  939. case CISS_LV_UNDERGOING_ENCRYPTION:
  940. status = "Volume undergoing encryption process";
  941. break;
  942. case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
  943. status = "Volume undergoing encryption re-keying process";
  944. break;
  945. case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
  946. status =
  947. "Encrypted volume inaccessible - disabled on ctrl";
  948. break;
  949. case CISS_LV_PENDING_ENCRYPTION:
  950. status = "Volume pending migration to encrypted state";
  951. break;
  952. case CISS_LV_PENDING_ENCRYPTION_REKEYING:
  953. status = "Volume pending encryption rekeying";
  954. break;
  955. case CISS_LV_NOT_SUPPORTED:
  956. status = "Volume not supported on this controller";
  957. break;
  958. case CISS_LV_STATUS_UNAVAILABLE:
  959. status = "Volume status not available";
  960. break;
  961. default:
  962. snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
  963. unknown_state_str, device->volume_status);
  964. status = unknown_state_buffer;
  965. break;
  966. }
  967. dev_info(&ctrl_info->pci_dev->dev,
  968. "scsi %d:%d:%d:%d %s\n",
  969. ctrl_info->scsi_host->host_no,
  970. device->bus, device->target, device->lun, status);
  971. }
  972. static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
  973. struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
  974. {
  975. struct pqi_scsi_dev *device;
  976. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  977. scsi_device_list_entry) {
  978. if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
  979. continue;
  980. if (pqi_is_logical_device(device))
  981. continue;
  982. if (device->aio_handle == aio_handle)
  983. return device;
  984. }
  985. return NULL;
  986. }
  987. static void pqi_update_logical_drive_queue_depth(
  988. struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
  989. {
  990. unsigned int i;
  991. struct raid_map *raid_map;
  992. struct raid_map_disk_data *disk_data;
  993. struct pqi_scsi_dev *phys_disk;
  994. unsigned int num_phys_disks;
  995. unsigned int num_raid_map_entries;
  996. unsigned int queue_depth;
  997. logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
  998. raid_map = logical_drive->raid_map;
  999. if (!raid_map)
  1000. return;
  1001. disk_data = raid_map->disk_data;
  1002. num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
  1003. (get_unaligned_le16(&raid_map->data_disks_per_row) +
  1004. get_unaligned_le16(&raid_map->metadata_disks_per_row));
  1005. num_raid_map_entries = num_phys_disks *
  1006. get_unaligned_le16(&raid_map->row_cnt);
  1007. queue_depth = 0;
  1008. for (i = 0; i < num_raid_map_entries; i++) {
  1009. phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
  1010. disk_data[i].aio_handle);
  1011. if (!phys_disk) {
  1012. dev_warn(&ctrl_info->pci_dev->dev,
  1013. "failed to find physical disk for logical drive %016llx\n",
  1014. get_unaligned_be64(logical_drive->scsi3addr));
  1015. logical_drive->offload_enabled = false;
  1016. logical_drive->offload_enabled_pending = false;
  1017. kfree(raid_map);
  1018. logical_drive->raid_map = NULL;
  1019. return;
  1020. }
  1021. queue_depth += phys_disk->queue_depth;
  1022. }
  1023. logical_drive->queue_depth = queue_depth;
  1024. }
  1025. static void pqi_update_all_logical_drive_queue_depths(
  1026. struct pqi_ctrl_info *ctrl_info)
  1027. {
  1028. struct pqi_scsi_dev *device;
  1029. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1030. scsi_device_list_entry) {
  1031. if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
  1032. continue;
  1033. if (!pqi_is_logical_device(device))
  1034. continue;
  1035. pqi_update_logical_drive_queue_depth(ctrl_info, device);
  1036. }
  1037. }
  1038. static void pqi_rescan_worker(struct work_struct *work)
  1039. {
  1040. struct pqi_ctrl_info *ctrl_info;
  1041. ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
  1042. rescan_work);
  1043. pqi_scan_scsi_devices(ctrl_info);
  1044. }
  1045. static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
  1046. struct pqi_scsi_dev *device)
  1047. {
  1048. int rc;
  1049. if (pqi_is_logical_device(device))
  1050. rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
  1051. device->target, device->lun);
  1052. else
  1053. rc = pqi_add_sas_device(ctrl_info->sas_host, device);
  1054. return rc;
  1055. }
  1056. static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
  1057. struct pqi_scsi_dev *device)
  1058. {
  1059. if (pqi_is_logical_device(device))
  1060. scsi_remove_device(device->sdev);
  1061. else
  1062. pqi_remove_sas_device(device);
  1063. }
  1064. /* Assumes the SCSI device list lock is held. */
  1065. static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
  1066. int bus, int target, int lun)
  1067. {
  1068. struct pqi_scsi_dev *device;
  1069. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1070. scsi_device_list_entry)
  1071. if (device->bus == bus && device->target == target &&
  1072. device->lun == lun)
  1073. return device;
  1074. return NULL;
  1075. }
  1076. static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
  1077. struct pqi_scsi_dev *dev2)
  1078. {
  1079. if (dev1->is_physical_device != dev2->is_physical_device)
  1080. return false;
  1081. if (dev1->is_physical_device)
  1082. return dev1->wwid == dev2->wwid;
  1083. return memcmp(dev1->volume_id, dev2->volume_id,
  1084. sizeof(dev1->volume_id)) == 0;
  1085. }
  1086. enum pqi_find_result {
  1087. DEVICE_NOT_FOUND,
  1088. DEVICE_CHANGED,
  1089. DEVICE_SAME,
  1090. };
  1091. static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
  1092. struct pqi_scsi_dev *device_to_find,
  1093. struct pqi_scsi_dev **matching_device)
  1094. {
  1095. struct pqi_scsi_dev *device;
  1096. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1097. scsi_device_list_entry) {
  1098. if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
  1099. device->scsi3addr)) {
  1100. *matching_device = device;
  1101. if (pqi_device_equal(device_to_find, device)) {
  1102. if (device_to_find->volume_offline)
  1103. return DEVICE_CHANGED;
  1104. return DEVICE_SAME;
  1105. }
  1106. return DEVICE_CHANGED;
  1107. }
  1108. }
  1109. return DEVICE_NOT_FOUND;
  1110. }
  1111. static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
  1112. char *action, struct pqi_scsi_dev *device)
  1113. {
  1114. dev_info(&ctrl_info->pci_dev->dev,
  1115. "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
  1116. action,
  1117. ctrl_info->scsi_host->host_no,
  1118. device->bus,
  1119. device->target,
  1120. device->lun,
  1121. scsi_device_type(device->devtype),
  1122. device->vendor,
  1123. device->model,
  1124. pqi_raid_level_to_string(device->raid_level),
  1125. device->offload_configured ? '+' : '-',
  1126. device->offload_enabled_pending ? '+' : '-',
  1127. device->expose_device ? '+' : '-',
  1128. device->queue_depth);
  1129. }
  1130. /* Assumes the SCSI device list lock is held. */
  1131. static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
  1132. struct pqi_scsi_dev *new_device)
  1133. {
  1134. existing_device->devtype = new_device->devtype;
  1135. existing_device->device_type = new_device->device_type;
  1136. existing_device->bus = new_device->bus;
  1137. if (new_device->target_lun_valid) {
  1138. existing_device->target = new_device->target;
  1139. existing_device->lun = new_device->lun;
  1140. existing_device->target_lun_valid = true;
  1141. }
  1142. /* By definition, the scsi3addr and wwid fields are already the same. */
  1143. existing_device->is_physical_device = new_device->is_physical_device;
  1144. existing_device->expose_device = new_device->expose_device;
  1145. existing_device->no_uld_attach = new_device->no_uld_attach;
  1146. existing_device->aio_enabled = new_device->aio_enabled;
  1147. memcpy(existing_device->vendor, new_device->vendor,
  1148. sizeof(existing_device->vendor));
  1149. memcpy(existing_device->model, new_device->model,
  1150. sizeof(existing_device->model));
  1151. existing_device->sas_address = new_device->sas_address;
  1152. existing_device->raid_level = new_device->raid_level;
  1153. existing_device->queue_depth = new_device->queue_depth;
  1154. existing_device->aio_handle = new_device->aio_handle;
  1155. existing_device->volume_status = new_device->volume_status;
  1156. existing_device->active_path_index = new_device->active_path_index;
  1157. existing_device->path_map = new_device->path_map;
  1158. existing_device->bay = new_device->bay;
  1159. memcpy(existing_device->box, new_device->box,
  1160. sizeof(existing_device->box));
  1161. memcpy(existing_device->phys_connector, new_device->phys_connector,
  1162. sizeof(existing_device->phys_connector));
  1163. existing_device->offload_configured = new_device->offload_configured;
  1164. existing_device->offload_enabled = false;
  1165. existing_device->offload_enabled_pending =
  1166. new_device->offload_enabled_pending;
  1167. existing_device->offload_to_mirror = 0;
  1168. kfree(existing_device->raid_map);
  1169. existing_device->raid_map = new_device->raid_map;
  1170. /* To prevent this from being freed later. */
  1171. new_device->raid_map = NULL;
  1172. }
  1173. static inline void pqi_free_device(struct pqi_scsi_dev *device)
  1174. {
  1175. if (device) {
  1176. kfree(device->raid_map);
  1177. kfree(device);
  1178. }
  1179. }
  1180. /*
  1181. * Called when exposing a new device to the OS fails in order to re-adjust
  1182. * our internal SCSI device list to match the SCSI ML's view.
  1183. */
  1184. static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
  1185. struct pqi_scsi_dev *device)
  1186. {
  1187. unsigned long flags;
  1188. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  1189. list_del(&device->scsi_device_list_entry);
  1190. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  1191. /* Allow the device structure to be freed later. */
  1192. device->keep_device = false;
  1193. }
  1194. static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
  1195. struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
  1196. {
  1197. int rc;
  1198. unsigned int i;
  1199. unsigned long flags;
  1200. enum pqi_find_result find_result;
  1201. struct pqi_scsi_dev *device;
  1202. struct pqi_scsi_dev *next;
  1203. struct pqi_scsi_dev *matching_device;
  1204. struct list_head add_list;
  1205. struct list_head delete_list;
  1206. INIT_LIST_HEAD(&add_list);
  1207. INIT_LIST_HEAD(&delete_list);
  1208. /*
  1209. * The idea here is to do as little work as possible while holding the
  1210. * spinlock. That's why we go to great pains to defer anything other
  1211. * than updating the internal device list until after we release the
  1212. * spinlock.
  1213. */
  1214. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  1215. /* Assume that all devices in the existing list have gone away. */
  1216. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1217. scsi_device_list_entry)
  1218. device->device_gone = true;
  1219. for (i = 0; i < num_new_devices; i++) {
  1220. device = new_device_list[i];
  1221. find_result = pqi_scsi_find_entry(ctrl_info, device,
  1222. &matching_device);
  1223. switch (find_result) {
  1224. case DEVICE_SAME:
  1225. /*
  1226. * The newly found device is already in the existing
  1227. * device list.
  1228. */
  1229. device->new_device = false;
  1230. matching_device->device_gone = false;
  1231. pqi_scsi_update_device(matching_device, device);
  1232. break;
  1233. case DEVICE_NOT_FOUND:
  1234. /*
  1235. * The newly found device is NOT in the existing device
  1236. * list.
  1237. */
  1238. device->new_device = true;
  1239. break;
  1240. case DEVICE_CHANGED:
  1241. /*
  1242. * The original device has gone away and we need to add
  1243. * the new device.
  1244. */
  1245. device->new_device = true;
  1246. break;
  1247. default:
  1248. WARN_ON(find_result);
  1249. break;
  1250. }
  1251. }
  1252. /* Process all devices that have gone away. */
  1253. list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
  1254. scsi_device_list_entry) {
  1255. if (device->device_gone) {
  1256. list_del(&device->scsi_device_list_entry);
  1257. list_add_tail(&device->delete_list_entry, &delete_list);
  1258. }
  1259. }
  1260. /* Process all new devices. */
  1261. for (i = 0; i < num_new_devices; i++) {
  1262. device = new_device_list[i];
  1263. if (!device->new_device)
  1264. continue;
  1265. if (device->volume_offline)
  1266. continue;
  1267. list_add_tail(&device->scsi_device_list_entry,
  1268. &ctrl_info->scsi_device_list);
  1269. list_add_tail(&device->add_list_entry, &add_list);
  1270. /* To prevent this device structure from being freed later. */
  1271. device->keep_device = true;
  1272. }
  1273. pqi_update_all_logical_drive_queue_depths(ctrl_info);
  1274. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1275. scsi_device_list_entry)
  1276. device->offload_enabled =
  1277. device->offload_enabled_pending;
  1278. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  1279. /* Remove all devices that have gone away. */
  1280. list_for_each_entry_safe(device, next, &delete_list,
  1281. delete_list_entry) {
  1282. if (device->sdev)
  1283. pqi_remove_device(ctrl_info, device);
  1284. if (device->volume_offline) {
  1285. pqi_dev_info(ctrl_info, "offline", device);
  1286. pqi_show_volume_status(ctrl_info, device);
  1287. } else {
  1288. pqi_dev_info(ctrl_info, "removed", device);
  1289. }
  1290. list_del(&device->delete_list_entry);
  1291. pqi_free_device(device);
  1292. }
  1293. /*
  1294. * Notify the SCSI ML if the queue depth of any existing device has
  1295. * changed.
  1296. */
  1297. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1298. scsi_device_list_entry) {
  1299. if (device->sdev && device->queue_depth !=
  1300. device->advertised_queue_depth) {
  1301. device->advertised_queue_depth = device->queue_depth;
  1302. scsi_change_queue_depth(device->sdev,
  1303. device->advertised_queue_depth);
  1304. }
  1305. }
  1306. /* Expose any new devices. */
  1307. list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
  1308. if (device->expose_device && !device->sdev) {
  1309. rc = pqi_add_device(ctrl_info, device);
  1310. if (rc) {
  1311. dev_warn(&ctrl_info->pci_dev->dev,
  1312. "scsi %d:%d:%d:%d addition failed, device not added\n",
  1313. ctrl_info->scsi_host->host_no,
  1314. device->bus, device->target,
  1315. device->lun);
  1316. pqi_fixup_botched_add(ctrl_info, device);
  1317. continue;
  1318. }
  1319. }
  1320. pqi_dev_info(ctrl_info, "added", device);
  1321. }
  1322. }
  1323. static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
  1324. {
  1325. bool is_supported = false;
  1326. switch (device->devtype) {
  1327. case TYPE_DISK:
  1328. case TYPE_ZBC:
  1329. case TYPE_TAPE:
  1330. case TYPE_MEDIUM_CHANGER:
  1331. case TYPE_ENCLOSURE:
  1332. is_supported = true;
  1333. break;
  1334. case TYPE_RAID:
  1335. /*
  1336. * Only support the HBA controller itself as a RAID
  1337. * controller. If it's a RAID controller other than
  1338. * the HBA itself (an external RAID controller, MSA500
  1339. * or similar), we don't support it.
  1340. */
  1341. if (pqi_is_hba_lunid(device->scsi3addr))
  1342. is_supported = true;
  1343. break;
  1344. }
  1345. return is_supported;
  1346. }
  1347. static inline bool pqi_skip_device(u8 *scsi3addr,
  1348. struct report_phys_lun_extended_entry *phys_lun_ext_entry)
  1349. {
  1350. u8 device_flags;
  1351. if (!MASKED_DEVICE(scsi3addr))
  1352. return false;
  1353. /* The device is masked. */
  1354. device_flags = phys_lun_ext_entry->device_flags;
  1355. if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
  1356. /*
  1357. * It's a non-disk device. We ignore all devices of this type
  1358. * when they're masked.
  1359. */
  1360. return true;
  1361. }
  1362. return false;
  1363. }
  1364. static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
  1365. {
  1366. /* Expose all devices except for physical devices that are masked. */
  1367. if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
  1368. return false;
  1369. return true;
  1370. }
  1371. static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
  1372. {
  1373. int i;
  1374. int rc;
  1375. struct list_head new_device_list_head;
  1376. struct report_phys_lun_extended *physdev_list = NULL;
  1377. struct report_log_lun_extended *logdev_list = NULL;
  1378. struct report_phys_lun_extended_entry *phys_lun_ext_entry;
  1379. struct report_log_lun_extended_entry *log_lun_ext_entry;
  1380. struct bmic_identify_physical_device *id_phys = NULL;
  1381. u32 num_physicals;
  1382. u32 num_logicals;
  1383. struct pqi_scsi_dev **new_device_list = NULL;
  1384. struct pqi_scsi_dev *device;
  1385. struct pqi_scsi_dev *next;
  1386. unsigned int num_new_devices;
  1387. unsigned int num_valid_devices;
  1388. bool is_physical_device;
  1389. u8 *scsi3addr;
  1390. static char *out_of_memory_msg =
  1391. "out of memory, device discovery stopped";
  1392. INIT_LIST_HEAD(&new_device_list_head);
  1393. rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
  1394. if (rc)
  1395. goto out;
  1396. if (physdev_list)
  1397. num_physicals =
  1398. get_unaligned_be32(&physdev_list->header.list_length)
  1399. / sizeof(physdev_list->lun_entries[0]);
  1400. else
  1401. num_physicals = 0;
  1402. if (logdev_list)
  1403. num_logicals =
  1404. get_unaligned_be32(&logdev_list->header.list_length)
  1405. / sizeof(logdev_list->lun_entries[0]);
  1406. else
  1407. num_logicals = 0;
  1408. if (num_physicals) {
  1409. /*
  1410. * We need this buffer for calls to pqi_get_physical_disk_info()
  1411. * below. We allocate it here instead of inside
  1412. * pqi_get_physical_disk_info() because it's a fairly large
  1413. * buffer.
  1414. */
  1415. id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
  1416. if (!id_phys) {
  1417. dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
  1418. out_of_memory_msg);
  1419. rc = -ENOMEM;
  1420. goto out;
  1421. }
  1422. }
  1423. num_new_devices = num_physicals + num_logicals;
  1424. new_device_list = kmalloc(sizeof(*new_device_list) *
  1425. num_new_devices, GFP_KERNEL);
  1426. if (!new_device_list) {
  1427. dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
  1428. rc = -ENOMEM;
  1429. goto out;
  1430. }
  1431. for (i = 0; i < num_new_devices; i++) {
  1432. device = kzalloc(sizeof(*device), GFP_KERNEL);
  1433. if (!device) {
  1434. dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
  1435. out_of_memory_msg);
  1436. rc = -ENOMEM;
  1437. goto out;
  1438. }
  1439. list_add_tail(&device->new_device_list_entry,
  1440. &new_device_list_head);
  1441. }
  1442. device = NULL;
  1443. num_valid_devices = 0;
  1444. for (i = 0; i < num_new_devices; i++) {
  1445. if (i < num_physicals) {
  1446. is_physical_device = true;
  1447. phys_lun_ext_entry = &physdev_list->lun_entries[i];
  1448. log_lun_ext_entry = NULL;
  1449. scsi3addr = phys_lun_ext_entry->lunid;
  1450. } else {
  1451. is_physical_device = false;
  1452. phys_lun_ext_entry = NULL;
  1453. log_lun_ext_entry =
  1454. &logdev_list->lun_entries[i - num_physicals];
  1455. scsi3addr = log_lun_ext_entry->lunid;
  1456. }
  1457. if (is_physical_device &&
  1458. pqi_skip_device(scsi3addr, phys_lun_ext_entry))
  1459. continue;
  1460. if (device)
  1461. device = list_next_entry(device, new_device_list_entry);
  1462. else
  1463. device = list_first_entry(&new_device_list_head,
  1464. struct pqi_scsi_dev, new_device_list_entry);
  1465. memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
  1466. device->is_physical_device = is_physical_device;
  1467. device->raid_level = SA_RAID_UNKNOWN;
  1468. /* Gather information about the device. */
  1469. rc = pqi_get_device_info(ctrl_info, device);
  1470. if (rc == -ENOMEM) {
  1471. dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
  1472. out_of_memory_msg);
  1473. goto out;
  1474. }
  1475. if (rc) {
  1476. dev_warn(&ctrl_info->pci_dev->dev,
  1477. "obtaining device info failed, skipping device %016llx\n",
  1478. get_unaligned_be64(device->scsi3addr));
  1479. rc = 0;
  1480. continue;
  1481. }
  1482. if (!pqi_is_supported_device(device))
  1483. continue;
  1484. pqi_assign_bus_target_lun(device);
  1485. device->expose_device = pqi_expose_device(device);
  1486. if (device->is_physical_device) {
  1487. device->wwid = phys_lun_ext_entry->wwid;
  1488. if ((phys_lun_ext_entry->device_flags &
  1489. REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
  1490. phys_lun_ext_entry->aio_handle)
  1491. device->aio_enabled = true;
  1492. } else {
  1493. memcpy(device->volume_id, log_lun_ext_entry->volume_id,
  1494. sizeof(device->volume_id));
  1495. }
  1496. switch (device->devtype) {
  1497. case TYPE_DISK:
  1498. case TYPE_ZBC:
  1499. case TYPE_ENCLOSURE:
  1500. if (device->is_physical_device) {
  1501. device->sas_address =
  1502. get_unaligned_be64(&device->wwid);
  1503. if (device->devtype == TYPE_DISK ||
  1504. device->devtype == TYPE_ZBC) {
  1505. device->aio_handle =
  1506. phys_lun_ext_entry->aio_handle;
  1507. pqi_get_physical_disk_info(ctrl_info,
  1508. device, id_phys);
  1509. }
  1510. }
  1511. break;
  1512. }
  1513. new_device_list[num_valid_devices++] = device;
  1514. }
  1515. pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
  1516. out:
  1517. list_for_each_entry_safe(device, next, &new_device_list_head,
  1518. new_device_list_entry) {
  1519. if (device->keep_device)
  1520. continue;
  1521. list_del(&device->new_device_list_entry);
  1522. pqi_free_device(device);
  1523. }
  1524. kfree(new_device_list);
  1525. kfree(physdev_list);
  1526. kfree(logdev_list);
  1527. kfree(id_phys);
  1528. return rc;
  1529. }
  1530. static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
  1531. {
  1532. unsigned long flags;
  1533. struct pqi_scsi_dev *device;
  1534. struct pqi_scsi_dev *next;
  1535. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  1536. list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
  1537. scsi_device_list_entry) {
  1538. if (device->sdev)
  1539. pqi_remove_device(ctrl_info, device);
  1540. list_del(&device->scsi_device_list_entry);
  1541. pqi_free_device(device);
  1542. }
  1543. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  1544. }
  1545. static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
  1546. {
  1547. int rc;
  1548. if (pqi_ctrl_offline(ctrl_info))
  1549. return -ENXIO;
  1550. mutex_lock(&ctrl_info->scan_mutex);
  1551. rc = pqi_update_scsi_devices(ctrl_info);
  1552. if (rc)
  1553. pqi_schedule_rescan_worker(ctrl_info);
  1554. mutex_unlock(&ctrl_info->scan_mutex);
  1555. return rc;
  1556. }
  1557. static void pqi_scan_start(struct Scsi_Host *shost)
  1558. {
  1559. pqi_scan_scsi_devices(shost_to_hba(shost));
  1560. }
  1561. /* Returns TRUE if scan is finished. */
  1562. static int pqi_scan_finished(struct Scsi_Host *shost,
  1563. unsigned long elapsed_time)
  1564. {
  1565. struct pqi_ctrl_info *ctrl_info;
  1566. ctrl_info = shost_priv(shost);
  1567. return !mutex_is_locked(&ctrl_info->scan_mutex);
  1568. }
  1569. static inline void pqi_set_encryption_info(
  1570. struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
  1571. u64 first_block)
  1572. {
  1573. u32 volume_blk_size;
  1574. /*
  1575. * Set the encryption tweak values based on logical block address.
  1576. * If the block size is 512, the tweak value is equal to the LBA.
  1577. * For other block sizes, tweak value is (LBA * block size) / 512.
  1578. */
  1579. volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
  1580. if (volume_blk_size != 512)
  1581. first_block = (first_block * volume_blk_size) / 512;
  1582. encryption_info->data_encryption_key_index =
  1583. get_unaligned_le16(&raid_map->data_encryption_key_index);
  1584. encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
  1585. encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
  1586. }
  1587. /*
  1588. * Attempt to perform offload RAID mapping for a logical volume I/O.
  1589. */
  1590. #define PQI_RAID_BYPASS_INELIGIBLE 1
  1591. static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
  1592. struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
  1593. struct pqi_queue_group *queue_group)
  1594. {
  1595. struct raid_map *raid_map;
  1596. bool is_write = false;
  1597. u32 map_index;
  1598. u64 first_block;
  1599. u64 last_block;
  1600. u32 block_cnt;
  1601. u32 blocks_per_row;
  1602. u64 first_row;
  1603. u64 last_row;
  1604. u32 first_row_offset;
  1605. u32 last_row_offset;
  1606. u32 first_column;
  1607. u32 last_column;
  1608. u64 r0_first_row;
  1609. u64 r0_last_row;
  1610. u32 r5or6_blocks_per_row;
  1611. u64 r5or6_first_row;
  1612. u64 r5or6_last_row;
  1613. u32 r5or6_first_row_offset;
  1614. u32 r5or6_last_row_offset;
  1615. u32 r5or6_first_column;
  1616. u32 r5or6_last_column;
  1617. u16 data_disks_per_row;
  1618. u32 total_disks_per_row;
  1619. u16 layout_map_count;
  1620. u32 stripesize;
  1621. u16 strip_size;
  1622. u32 first_group;
  1623. u32 last_group;
  1624. u32 current_group;
  1625. u32 map_row;
  1626. u32 aio_handle;
  1627. u64 disk_block;
  1628. u32 disk_block_cnt;
  1629. u8 cdb[16];
  1630. u8 cdb_length;
  1631. int offload_to_mirror;
  1632. struct pqi_encryption_info *encryption_info_ptr;
  1633. struct pqi_encryption_info encryption_info;
  1634. #if BITS_PER_LONG == 32
  1635. u64 tmpdiv;
  1636. #endif
  1637. /* Check for valid opcode, get LBA and block count. */
  1638. switch (scmd->cmnd[0]) {
  1639. case WRITE_6:
  1640. is_write = true;
  1641. /* fall through */
  1642. case READ_6:
  1643. first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
  1644. (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
  1645. block_cnt = (u32)scmd->cmnd[4];
  1646. if (block_cnt == 0)
  1647. block_cnt = 256;
  1648. break;
  1649. case WRITE_10:
  1650. is_write = true;
  1651. /* fall through */
  1652. case READ_10:
  1653. first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
  1654. block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
  1655. break;
  1656. case WRITE_12:
  1657. is_write = true;
  1658. /* fall through */
  1659. case READ_12:
  1660. first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
  1661. block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
  1662. break;
  1663. case WRITE_16:
  1664. is_write = true;
  1665. /* fall through */
  1666. case READ_16:
  1667. first_block = get_unaligned_be64(&scmd->cmnd[2]);
  1668. block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
  1669. break;
  1670. default:
  1671. /* Process via normal I/O path. */
  1672. return PQI_RAID_BYPASS_INELIGIBLE;
  1673. }
  1674. /* Check for write to non-RAID-0. */
  1675. if (is_write && device->raid_level != SA_RAID_0)
  1676. return PQI_RAID_BYPASS_INELIGIBLE;
  1677. if (unlikely(block_cnt == 0))
  1678. return PQI_RAID_BYPASS_INELIGIBLE;
  1679. last_block = first_block + block_cnt - 1;
  1680. raid_map = device->raid_map;
  1681. /* Check for invalid block or wraparound. */
  1682. if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
  1683. last_block < first_block)
  1684. return PQI_RAID_BYPASS_INELIGIBLE;
  1685. data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
  1686. strip_size = get_unaligned_le16(&raid_map->strip_size);
  1687. layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
  1688. /* Calculate stripe information for the request. */
  1689. blocks_per_row = data_disks_per_row * strip_size;
  1690. #if BITS_PER_LONG == 32
  1691. tmpdiv = first_block;
  1692. do_div(tmpdiv, blocks_per_row);
  1693. first_row = tmpdiv;
  1694. tmpdiv = last_block;
  1695. do_div(tmpdiv, blocks_per_row);
  1696. last_row = tmpdiv;
  1697. first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
  1698. last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
  1699. tmpdiv = first_row_offset;
  1700. do_div(tmpdiv, strip_size);
  1701. first_column = tmpdiv;
  1702. tmpdiv = last_row_offset;
  1703. do_div(tmpdiv, strip_size);
  1704. last_column = tmpdiv;
  1705. #else
  1706. first_row = first_block / blocks_per_row;
  1707. last_row = last_block / blocks_per_row;
  1708. first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
  1709. last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
  1710. first_column = first_row_offset / strip_size;
  1711. last_column = last_row_offset / strip_size;
  1712. #endif
  1713. /* If this isn't a single row/column then give to the controller. */
  1714. if (first_row != last_row || first_column != last_column)
  1715. return PQI_RAID_BYPASS_INELIGIBLE;
  1716. /* Proceeding with driver mapping. */
  1717. total_disks_per_row = data_disks_per_row +
  1718. get_unaligned_le16(&raid_map->metadata_disks_per_row);
  1719. map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
  1720. get_unaligned_le16(&raid_map->row_cnt);
  1721. map_index = (map_row * total_disks_per_row) + first_column;
  1722. /* RAID 1 */
  1723. if (device->raid_level == SA_RAID_1) {
  1724. if (device->offload_to_mirror)
  1725. map_index += data_disks_per_row;
  1726. device->offload_to_mirror = !device->offload_to_mirror;
  1727. } else if (device->raid_level == SA_RAID_ADM) {
  1728. /* RAID ADM */
  1729. /*
  1730. * Handles N-way mirrors (R1-ADM) and R10 with # of drives
  1731. * divisible by 3.
  1732. */
  1733. offload_to_mirror = device->offload_to_mirror;
  1734. if (offload_to_mirror == 0) {
  1735. /* use physical disk in the first mirrored group. */
  1736. map_index %= data_disks_per_row;
  1737. } else {
  1738. do {
  1739. /*
  1740. * Determine mirror group that map_index
  1741. * indicates.
  1742. */
  1743. current_group = map_index / data_disks_per_row;
  1744. if (offload_to_mirror != current_group) {
  1745. if (current_group <
  1746. layout_map_count - 1) {
  1747. /*
  1748. * Select raid index from
  1749. * next group.
  1750. */
  1751. map_index += data_disks_per_row;
  1752. current_group++;
  1753. } else {
  1754. /*
  1755. * Select raid index from first
  1756. * group.
  1757. */
  1758. map_index %= data_disks_per_row;
  1759. current_group = 0;
  1760. }
  1761. }
  1762. } while (offload_to_mirror != current_group);
  1763. }
  1764. /* Set mirror group to use next time. */
  1765. offload_to_mirror =
  1766. (offload_to_mirror >= layout_map_count - 1) ?
  1767. 0 : offload_to_mirror + 1;
  1768. WARN_ON(offload_to_mirror >= layout_map_count);
  1769. device->offload_to_mirror = offload_to_mirror;
  1770. /*
  1771. * Avoid direct use of device->offload_to_mirror within this
  1772. * function since multiple threads might simultaneously
  1773. * increment it beyond the range of device->layout_map_count -1.
  1774. */
  1775. } else if ((device->raid_level == SA_RAID_5 ||
  1776. device->raid_level == SA_RAID_6) && layout_map_count > 1) {
  1777. /* RAID 50/60 */
  1778. /* Verify first and last block are in same RAID group */
  1779. r5or6_blocks_per_row = strip_size * data_disks_per_row;
  1780. stripesize = r5or6_blocks_per_row * layout_map_count;
  1781. #if BITS_PER_LONG == 32
  1782. tmpdiv = first_block;
  1783. first_group = do_div(tmpdiv, stripesize);
  1784. tmpdiv = first_group;
  1785. do_div(tmpdiv, r5or6_blocks_per_row);
  1786. first_group = tmpdiv;
  1787. tmpdiv = last_block;
  1788. last_group = do_div(tmpdiv, stripesize);
  1789. tmpdiv = last_group;
  1790. do_div(tmpdiv, r5or6_blocks_per_row);
  1791. last_group = tmpdiv;
  1792. #else
  1793. first_group = (first_block % stripesize) / r5or6_blocks_per_row;
  1794. last_group = (last_block % stripesize) / r5or6_blocks_per_row;
  1795. #endif
  1796. if (first_group != last_group)
  1797. return PQI_RAID_BYPASS_INELIGIBLE;
  1798. /* Verify request is in a single row of RAID 5/6 */
  1799. #if BITS_PER_LONG == 32
  1800. tmpdiv = first_block;
  1801. do_div(tmpdiv, stripesize);
  1802. first_row = r5or6_first_row = r0_first_row = tmpdiv;
  1803. tmpdiv = last_block;
  1804. do_div(tmpdiv, stripesize);
  1805. r5or6_last_row = r0_last_row = tmpdiv;
  1806. #else
  1807. first_row = r5or6_first_row = r0_first_row =
  1808. first_block / stripesize;
  1809. r5or6_last_row = r0_last_row = last_block / stripesize;
  1810. #endif
  1811. if (r5or6_first_row != r5or6_last_row)
  1812. return PQI_RAID_BYPASS_INELIGIBLE;
  1813. /* Verify request is in a single column */
  1814. #if BITS_PER_LONG == 32
  1815. tmpdiv = first_block;
  1816. first_row_offset = do_div(tmpdiv, stripesize);
  1817. tmpdiv = first_row_offset;
  1818. first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
  1819. r5or6_first_row_offset = first_row_offset;
  1820. tmpdiv = last_block;
  1821. r5or6_last_row_offset = do_div(tmpdiv, stripesize);
  1822. tmpdiv = r5or6_last_row_offset;
  1823. r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
  1824. tmpdiv = r5or6_first_row_offset;
  1825. do_div(tmpdiv, strip_size);
  1826. first_column = r5or6_first_column = tmpdiv;
  1827. tmpdiv = r5or6_last_row_offset;
  1828. do_div(tmpdiv, strip_size);
  1829. r5or6_last_column = tmpdiv;
  1830. #else
  1831. first_row_offset = r5or6_first_row_offset =
  1832. (u32)((first_block % stripesize) %
  1833. r5or6_blocks_per_row);
  1834. r5or6_last_row_offset =
  1835. (u32)((last_block % stripesize) %
  1836. r5or6_blocks_per_row);
  1837. first_column = r5or6_first_row_offset / strip_size;
  1838. r5or6_first_column = first_column;
  1839. r5or6_last_column = r5or6_last_row_offset / strip_size;
  1840. #endif
  1841. if (r5or6_first_column != r5or6_last_column)
  1842. return PQI_RAID_BYPASS_INELIGIBLE;
  1843. /* Request is eligible */
  1844. map_row =
  1845. ((u32)(first_row >> raid_map->parity_rotation_shift)) %
  1846. get_unaligned_le16(&raid_map->row_cnt);
  1847. map_index = (first_group *
  1848. (get_unaligned_le16(&raid_map->row_cnt) *
  1849. total_disks_per_row)) +
  1850. (map_row * total_disks_per_row) + first_column;
  1851. }
  1852. if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
  1853. return PQI_RAID_BYPASS_INELIGIBLE;
  1854. aio_handle = raid_map->disk_data[map_index].aio_handle;
  1855. disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
  1856. first_row * strip_size +
  1857. (first_row_offset - first_column * strip_size);
  1858. disk_block_cnt = block_cnt;
  1859. /* Handle differing logical/physical block sizes. */
  1860. if (raid_map->phys_blk_shift) {
  1861. disk_block <<= raid_map->phys_blk_shift;
  1862. disk_block_cnt <<= raid_map->phys_blk_shift;
  1863. }
  1864. if (unlikely(disk_block_cnt > 0xffff))
  1865. return PQI_RAID_BYPASS_INELIGIBLE;
  1866. /* Build the new CDB for the physical disk I/O. */
  1867. if (disk_block > 0xffffffff) {
  1868. cdb[0] = is_write ? WRITE_16 : READ_16;
  1869. cdb[1] = 0;
  1870. put_unaligned_be64(disk_block, &cdb[2]);
  1871. put_unaligned_be32(disk_block_cnt, &cdb[10]);
  1872. cdb[14] = 0;
  1873. cdb[15] = 0;
  1874. cdb_length = 16;
  1875. } else {
  1876. cdb[0] = is_write ? WRITE_10 : READ_10;
  1877. cdb[1] = 0;
  1878. put_unaligned_be32((u32)disk_block, &cdb[2]);
  1879. cdb[6] = 0;
  1880. put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
  1881. cdb[9] = 0;
  1882. cdb_length = 10;
  1883. }
  1884. if (get_unaligned_le16(&raid_map->flags) &
  1885. RAID_MAP_ENCRYPTION_ENABLED) {
  1886. pqi_set_encryption_info(&encryption_info, raid_map,
  1887. first_block);
  1888. encryption_info_ptr = &encryption_info;
  1889. } else {
  1890. encryption_info_ptr = NULL;
  1891. }
  1892. return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
  1893. cdb, cdb_length, queue_group, encryption_info_ptr);
  1894. }
  1895. #define PQI_STATUS_IDLE 0x0
  1896. #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
  1897. #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
  1898. #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
  1899. #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
  1900. #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
  1901. #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
  1902. #define PQI_DEVICE_STATE_ERROR 0x4
  1903. #define PQI_MODE_READY_TIMEOUT_SECS 30
  1904. #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
  1905. static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
  1906. {
  1907. struct pqi_device_registers __iomem *pqi_registers;
  1908. unsigned long timeout;
  1909. u64 signature;
  1910. u8 status;
  1911. pqi_registers = ctrl_info->pqi_registers;
  1912. timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
  1913. while (1) {
  1914. signature = readq(&pqi_registers->signature);
  1915. if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
  1916. sizeof(signature)) == 0)
  1917. break;
  1918. if (time_after(jiffies, timeout)) {
  1919. dev_err(&ctrl_info->pci_dev->dev,
  1920. "timed out waiting for PQI signature\n");
  1921. return -ETIMEDOUT;
  1922. }
  1923. msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
  1924. }
  1925. while (1) {
  1926. status = readb(&pqi_registers->function_and_status_code);
  1927. if (status == PQI_STATUS_IDLE)
  1928. break;
  1929. if (time_after(jiffies, timeout)) {
  1930. dev_err(&ctrl_info->pci_dev->dev,
  1931. "timed out waiting for PQI IDLE\n");
  1932. return -ETIMEDOUT;
  1933. }
  1934. msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
  1935. }
  1936. while (1) {
  1937. if (readl(&pqi_registers->device_status) ==
  1938. PQI_DEVICE_STATE_ALL_REGISTERS_READY)
  1939. break;
  1940. if (time_after(jiffies, timeout)) {
  1941. dev_err(&ctrl_info->pci_dev->dev,
  1942. "timed out waiting for PQI all registers ready\n");
  1943. return -ETIMEDOUT;
  1944. }
  1945. msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
  1946. }
  1947. return 0;
  1948. }
  1949. static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
  1950. {
  1951. struct pqi_scsi_dev *device;
  1952. device = io_request->scmd->device->hostdata;
  1953. device->offload_enabled = false;
  1954. }
  1955. static inline void pqi_take_device_offline(struct scsi_device *sdev)
  1956. {
  1957. struct pqi_ctrl_info *ctrl_info;
  1958. struct pqi_scsi_dev *device;
  1959. if (scsi_device_online(sdev)) {
  1960. scsi_device_set_state(sdev, SDEV_OFFLINE);
  1961. ctrl_info = shost_to_hba(sdev->host);
  1962. schedule_delayed_work(&ctrl_info->rescan_work, 0);
  1963. device = sdev->hostdata;
  1964. dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
  1965. ctrl_info->scsi_host->host_no, device->bus,
  1966. device->target, device->lun);
  1967. }
  1968. }
  1969. static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
  1970. {
  1971. u8 scsi_status;
  1972. u8 host_byte;
  1973. struct scsi_cmnd *scmd;
  1974. struct pqi_raid_error_info *error_info;
  1975. size_t sense_data_length;
  1976. int residual_count;
  1977. int xfer_count;
  1978. struct scsi_sense_hdr sshdr;
  1979. scmd = io_request->scmd;
  1980. if (!scmd)
  1981. return;
  1982. error_info = io_request->error_info;
  1983. scsi_status = error_info->status;
  1984. host_byte = DID_OK;
  1985. if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
  1986. xfer_count =
  1987. get_unaligned_le32(&error_info->data_out_transferred);
  1988. residual_count = scsi_bufflen(scmd) - xfer_count;
  1989. scsi_set_resid(scmd, residual_count);
  1990. if (xfer_count < scmd->underflow)
  1991. host_byte = DID_SOFT_ERROR;
  1992. }
  1993. sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
  1994. if (sense_data_length == 0)
  1995. sense_data_length =
  1996. get_unaligned_le16(&error_info->response_data_length);
  1997. if (sense_data_length) {
  1998. if (sense_data_length > sizeof(error_info->data))
  1999. sense_data_length = sizeof(error_info->data);
  2000. if (scsi_status == SAM_STAT_CHECK_CONDITION &&
  2001. scsi_normalize_sense(error_info->data,
  2002. sense_data_length, &sshdr) &&
  2003. sshdr.sense_key == HARDWARE_ERROR &&
  2004. sshdr.asc == 0x3e &&
  2005. sshdr.ascq == 0x1) {
  2006. pqi_take_device_offline(scmd->device);
  2007. host_byte = DID_NO_CONNECT;
  2008. }
  2009. if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
  2010. sense_data_length = SCSI_SENSE_BUFFERSIZE;
  2011. memcpy(scmd->sense_buffer, error_info->data,
  2012. sense_data_length);
  2013. }
  2014. scmd->result = scsi_status;
  2015. set_host_byte(scmd, host_byte);
  2016. }
  2017. static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
  2018. {
  2019. u8 scsi_status;
  2020. u8 host_byte;
  2021. struct scsi_cmnd *scmd;
  2022. struct pqi_aio_error_info *error_info;
  2023. size_t sense_data_length;
  2024. int residual_count;
  2025. int xfer_count;
  2026. bool device_offline;
  2027. scmd = io_request->scmd;
  2028. error_info = io_request->error_info;
  2029. host_byte = DID_OK;
  2030. sense_data_length = 0;
  2031. device_offline = false;
  2032. switch (error_info->service_response) {
  2033. case PQI_AIO_SERV_RESPONSE_COMPLETE:
  2034. scsi_status = error_info->status;
  2035. break;
  2036. case PQI_AIO_SERV_RESPONSE_FAILURE:
  2037. switch (error_info->status) {
  2038. case PQI_AIO_STATUS_IO_ABORTED:
  2039. scsi_status = SAM_STAT_TASK_ABORTED;
  2040. break;
  2041. case PQI_AIO_STATUS_UNDERRUN:
  2042. scsi_status = SAM_STAT_GOOD;
  2043. residual_count = get_unaligned_le32(
  2044. &error_info->residual_count);
  2045. scsi_set_resid(scmd, residual_count);
  2046. xfer_count = scsi_bufflen(scmd) - residual_count;
  2047. if (xfer_count < scmd->underflow)
  2048. host_byte = DID_SOFT_ERROR;
  2049. break;
  2050. case PQI_AIO_STATUS_OVERRUN:
  2051. scsi_status = SAM_STAT_GOOD;
  2052. break;
  2053. case PQI_AIO_STATUS_AIO_PATH_DISABLED:
  2054. pqi_aio_path_disabled(io_request);
  2055. scsi_status = SAM_STAT_GOOD;
  2056. io_request->status = -EAGAIN;
  2057. break;
  2058. case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
  2059. case PQI_AIO_STATUS_INVALID_DEVICE:
  2060. device_offline = true;
  2061. pqi_take_device_offline(scmd->device);
  2062. host_byte = DID_NO_CONNECT;
  2063. scsi_status = SAM_STAT_CHECK_CONDITION;
  2064. break;
  2065. case PQI_AIO_STATUS_IO_ERROR:
  2066. default:
  2067. scsi_status = SAM_STAT_CHECK_CONDITION;
  2068. break;
  2069. }
  2070. break;
  2071. case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
  2072. case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
  2073. scsi_status = SAM_STAT_GOOD;
  2074. break;
  2075. case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
  2076. case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
  2077. default:
  2078. scsi_status = SAM_STAT_CHECK_CONDITION;
  2079. break;
  2080. }
  2081. if (error_info->data_present) {
  2082. sense_data_length =
  2083. get_unaligned_le16(&error_info->data_length);
  2084. if (sense_data_length) {
  2085. if (sense_data_length > sizeof(error_info->data))
  2086. sense_data_length = sizeof(error_info->data);
  2087. if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
  2088. sense_data_length = SCSI_SENSE_BUFFERSIZE;
  2089. memcpy(scmd->sense_buffer, error_info->data,
  2090. sense_data_length);
  2091. }
  2092. }
  2093. if (device_offline && sense_data_length == 0)
  2094. scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
  2095. 0x3e, 0x1);
  2096. scmd->result = scsi_status;
  2097. set_host_byte(scmd, host_byte);
  2098. }
  2099. static void pqi_process_io_error(unsigned int iu_type,
  2100. struct pqi_io_request *io_request)
  2101. {
  2102. switch (iu_type) {
  2103. case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
  2104. pqi_process_raid_io_error(io_request);
  2105. break;
  2106. case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
  2107. pqi_process_aio_io_error(io_request);
  2108. break;
  2109. }
  2110. }
  2111. static int pqi_interpret_task_management_response(
  2112. struct pqi_task_management_response *response)
  2113. {
  2114. int rc;
  2115. switch (response->response_code) {
  2116. case SOP_TMF_COMPLETE:
  2117. case SOP_TMF_FUNCTION_SUCCEEDED:
  2118. rc = 0;
  2119. break;
  2120. default:
  2121. rc = -EIO;
  2122. break;
  2123. }
  2124. return rc;
  2125. }
  2126. static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
  2127. struct pqi_queue_group *queue_group)
  2128. {
  2129. unsigned int num_responses;
  2130. pqi_index_t oq_pi;
  2131. pqi_index_t oq_ci;
  2132. struct pqi_io_request *io_request;
  2133. struct pqi_io_response *response;
  2134. u16 request_id;
  2135. num_responses = 0;
  2136. oq_ci = queue_group->oq_ci_copy;
  2137. while (1) {
  2138. oq_pi = *queue_group->oq_pi;
  2139. if (oq_pi == oq_ci)
  2140. break;
  2141. num_responses++;
  2142. response = queue_group->oq_element_array +
  2143. (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
  2144. request_id = get_unaligned_le16(&response->request_id);
  2145. WARN_ON(request_id >= ctrl_info->max_io_slots);
  2146. io_request = &ctrl_info->io_request_pool[request_id];
  2147. WARN_ON(atomic_read(&io_request->refcount) == 0);
  2148. switch (response->header.iu_type) {
  2149. case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
  2150. case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
  2151. if (io_request->scmd)
  2152. io_request->scmd->result = 0;
  2153. /* fall through */
  2154. case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
  2155. break;
  2156. case PQI_RESPONSE_IU_TASK_MANAGEMENT:
  2157. io_request->status =
  2158. pqi_interpret_task_management_response(
  2159. (void *)response);
  2160. break;
  2161. case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
  2162. pqi_aio_path_disabled(io_request);
  2163. io_request->status = -EAGAIN;
  2164. break;
  2165. case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
  2166. case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
  2167. io_request->error_info = ctrl_info->error_buffer +
  2168. (get_unaligned_le16(&response->error_index) *
  2169. PQI_ERROR_BUFFER_ELEMENT_LENGTH);
  2170. pqi_process_io_error(response->header.iu_type,
  2171. io_request);
  2172. break;
  2173. default:
  2174. dev_err(&ctrl_info->pci_dev->dev,
  2175. "unexpected IU type: 0x%x\n",
  2176. response->header.iu_type);
  2177. WARN_ON(response->header.iu_type);
  2178. break;
  2179. }
  2180. io_request->io_complete_callback(io_request,
  2181. io_request->context);
  2182. /*
  2183. * Note that the I/O request structure CANNOT BE TOUCHED after
  2184. * returning from the I/O completion callback!
  2185. */
  2186. oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
  2187. }
  2188. if (num_responses) {
  2189. queue_group->oq_ci_copy = oq_ci;
  2190. writel(oq_ci, queue_group->oq_ci);
  2191. }
  2192. return num_responses;
  2193. }
  2194. static inline unsigned int pqi_num_elements_free(unsigned int pi,
  2195. unsigned int ci, unsigned int elements_in_queue)
  2196. {
  2197. unsigned int num_elements_used;
  2198. if (pi >= ci)
  2199. num_elements_used = pi - ci;
  2200. else
  2201. num_elements_used = elements_in_queue - ci + pi;
  2202. return elements_in_queue - num_elements_used - 1;
  2203. }
  2204. #define PQI_EVENT_ACK_TIMEOUT 30
  2205. static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
  2206. struct pqi_event_acknowledge_request *iu, size_t iu_length)
  2207. {
  2208. pqi_index_t iq_pi;
  2209. pqi_index_t iq_ci;
  2210. unsigned long flags;
  2211. void *next_element;
  2212. unsigned long timeout;
  2213. struct pqi_queue_group *queue_group;
  2214. queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
  2215. put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
  2216. timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
  2217. while (1) {
  2218. spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
  2219. iq_pi = queue_group->iq_pi_copy[RAID_PATH];
  2220. iq_ci = *queue_group->iq_ci[RAID_PATH];
  2221. if (pqi_num_elements_free(iq_pi, iq_ci,
  2222. ctrl_info->num_elements_per_iq))
  2223. break;
  2224. spin_unlock_irqrestore(
  2225. &queue_group->submit_lock[RAID_PATH], flags);
  2226. if (time_after(jiffies, timeout)) {
  2227. dev_err(&ctrl_info->pci_dev->dev,
  2228. "sending event acknowledge timed out\n");
  2229. return;
  2230. }
  2231. }
  2232. next_element = queue_group->iq_element_array[RAID_PATH] +
  2233. (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  2234. memcpy(next_element, iu, iu_length);
  2235. iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
  2236. queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
  2237. /*
  2238. * This write notifies the controller that an IU is available to be
  2239. * processed.
  2240. */
  2241. writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
  2242. spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
  2243. }
  2244. static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
  2245. struct pqi_event *event)
  2246. {
  2247. struct pqi_event_acknowledge_request request;
  2248. memset(&request, 0, sizeof(request));
  2249. request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
  2250. put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
  2251. &request.header.iu_length);
  2252. request.event_type = event->event_type;
  2253. request.event_id = event->event_id;
  2254. request.additional_event_id = event->additional_event_id;
  2255. pqi_start_event_ack(ctrl_info, &request, sizeof(request));
  2256. }
  2257. static void pqi_event_worker(struct work_struct *work)
  2258. {
  2259. unsigned int i;
  2260. struct pqi_ctrl_info *ctrl_info;
  2261. struct pqi_event *pending_event;
  2262. bool got_non_heartbeat_event = false;
  2263. ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
  2264. pending_event = ctrl_info->pending_events;
  2265. for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
  2266. if (pending_event->pending) {
  2267. pending_event->pending = false;
  2268. pqi_acknowledge_event(ctrl_info, pending_event);
  2269. if (i != PQI_EVENT_HEARTBEAT)
  2270. got_non_heartbeat_event = true;
  2271. }
  2272. pending_event++;
  2273. }
  2274. if (got_non_heartbeat_event)
  2275. pqi_schedule_rescan_worker(ctrl_info);
  2276. }
  2277. static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
  2278. {
  2279. unsigned int i;
  2280. unsigned int path;
  2281. struct pqi_queue_group *queue_group;
  2282. unsigned long flags;
  2283. struct pqi_io_request *io_request;
  2284. struct pqi_io_request *next;
  2285. struct scsi_cmnd *scmd;
  2286. ctrl_info->controller_online = false;
  2287. dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
  2288. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2289. queue_group = &ctrl_info->queue_groups[i];
  2290. for (path = 0; path < 2; path++) {
  2291. spin_lock_irqsave(
  2292. &queue_group->submit_lock[path], flags);
  2293. list_for_each_entry_safe(io_request, next,
  2294. &queue_group->request_list[path],
  2295. request_list_entry) {
  2296. scmd = io_request->scmd;
  2297. if (scmd) {
  2298. set_host_byte(scmd, DID_NO_CONNECT);
  2299. pqi_scsi_done(scmd);
  2300. }
  2301. list_del(&io_request->request_list_entry);
  2302. }
  2303. spin_unlock_irqrestore(
  2304. &queue_group->submit_lock[path], flags);
  2305. }
  2306. }
  2307. }
  2308. #define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
  2309. #define PQI_MAX_HEARTBEAT_REQUESTS 5
  2310. static void pqi_heartbeat_timer_handler(unsigned long data)
  2311. {
  2312. int num_interrupts;
  2313. struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
  2314. num_interrupts = atomic_read(&ctrl_info->num_interrupts);
  2315. if (num_interrupts == ctrl_info->previous_num_interrupts) {
  2316. ctrl_info->num_heartbeats_requested++;
  2317. if (ctrl_info->num_heartbeats_requested >
  2318. PQI_MAX_HEARTBEAT_REQUESTS) {
  2319. pqi_take_ctrl_offline(ctrl_info);
  2320. return;
  2321. }
  2322. ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
  2323. schedule_work(&ctrl_info->event_work);
  2324. } else {
  2325. ctrl_info->num_heartbeats_requested = 0;
  2326. }
  2327. ctrl_info->previous_num_interrupts = num_interrupts;
  2328. mod_timer(&ctrl_info->heartbeat_timer,
  2329. jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
  2330. }
  2331. static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
  2332. {
  2333. ctrl_info->previous_num_interrupts =
  2334. atomic_read(&ctrl_info->num_interrupts);
  2335. init_timer(&ctrl_info->heartbeat_timer);
  2336. ctrl_info->heartbeat_timer.expires =
  2337. jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
  2338. ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
  2339. ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
  2340. add_timer(&ctrl_info->heartbeat_timer);
  2341. ctrl_info->heartbeat_timer_started = true;
  2342. }
  2343. static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
  2344. {
  2345. if (ctrl_info->heartbeat_timer_started)
  2346. del_timer_sync(&ctrl_info->heartbeat_timer);
  2347. }
  2348. static int pqi_event_type_to_event_index(unsigned int event_type)
  2349. {
  2350. int index;
  2351. switch (event_type) {
  2352. case PQI_EVENT_TYPE_HEARTBEAT:
  2353. index = PQI_EVENT_HEARTBEAT;
  2354. break;
  2355. case PQI_EVENT_TYPE_HOTPLUG:
  2356. index = PQI_EVENT_HOTPLUG;
  2357. break;
  2358. case PQI_EVENT_TYPE_HARDWARE:
  2359. index = PQI_EVENT_HARDWARE;
  2360. break;
  2361. case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
  2362. index = PQI_EVENT_PHYSICAL_DEVICE;
  2363. break;
  2364. case PQI_EVENT_TYPE_LOGICAL_DEVICE:
  2365. index = PQI_EVENT_LOGICAL_DEVICE;
  2366. break;
  2367. case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
  2368. index = PQI_EVENT_AIO_STATE_CHANGE;
  2369. break;
  2370. case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
  2371. index = PQI_EVENT_AIO_CONFIG_CHANGE;
  2372. break;
  2373. default:
  2374. index = -1;
  2375. break;
  2376. }
  2377. return index;
  2378. }
  2379. static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
  2380. {
  2381. unsigned int num_events;
  2382. pqi_index_t oq_pi;
  2383. pqi_index_t oq_ci;
  2384. struct pqi_event_queue *event_queue;
  2385. struct pqi_event_response *response;
  2386. struct pqi_event *pending_event;
  2387. bool need_delayed_work;
  2388. int event_index;
  2389. event_queue = &ctrl_info->event_queue;
  2390. num_events = 0;
  2391. need_delayed_work = false;
  2392. oq_ci = event_queue->oq_ci_copy;
  2393. while (1) {
  2394. oq_pi = *event_queue->oq_pi;
  2395. if (oq_pi == oq_ci)
  2396. break;
  2397. num_events++;
  2398. response = event_queue->oq_element_array +
  2399. (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
  2400. event_index =
  2401. pqi_event_type_to_event_index(response->event_type);
  2402. if (event_index >= 0) {
  2403. if (response->request_acknowlege) {
  2404. pending_event =
  2405. &ctrl_info->pending_events[event_index];
  2406. pending_event->event_type =
  2407. response->event_type;
  2408. pending_event->event_id = response->event_id;
  2409. pending_event->additional_event_id =
  2410. response->additional_event_id;
  2411. if (event_index != PQI_EVENT_HEARTBEAT) {
  2412. pending_event->pending = true;
  2413. need_delayed_work = true;
  2414. }
  2415. }
  2416. }
  2417. oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
  2418. }
  2419. if (num_events) {
  2420. event_queue->oq_ci_copy = oq_ci;
  2421. writel(oq_ci, event_queue->oq_ci);
  2422. if (need_delayed_work)
  2423. schedule_work(&ctrl_info->event_work);
  2424. }
  2425. return num_events;
  2426. }
  2427. static irqreturn_t pqi_irq_handler(int irq, void *data)
  2428. {
  2429. struct pqi_ctrl_info *ctrl_info;
  2430. struct pqi_queue_group *queue_group;
  2431. unsigned int num_responses_handled;
  2432. queue_group = data;
  2433. ctrl_info = queue_group->ctrl_info;
  2434. if (!ctrl_info || !queue_group->oq_ci)
  2435. return IRQ_NONE;
  2436. num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
  2437. if (irq == ctrl_info->event_irq)
  2438. num_responses_handled += pqi_process_event_intr(ctrl_info);
  2439. if (num_responses_handled)
  2440. atomic_inc(&ctrl_info->num_interrupts);
  2441. pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
  2442. pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
  2443. return IRQ_HANDLED;
  2444. }
  2445. static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
  2446. {
  2447. int i;
  2448. int rc;
  2449. ctrl_info->event_irq = ctrl_info->msix_vectors[0];
  2450. for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
  2451. rc = request_irq(ctrl_info->msix_vectors[i],
  2452. pqi_irq_handler, 0,
  2453. DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
  2454. if (rc) {
  2455. dev_err(&ctrl_info->pci_dev->dev,
  2456. "irq %u init failed with error %d\n",
  2457. ctrl_info->msix_vectors[i], rc);
  2458. return rc;
  2459. }
  2460. ctrl_info->num_msix_vectors_initialized++;
  2461. }
  2462. return 0;
  2463. }
  2464. static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
  2465. {
  2466. int i;
  2467. for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
  2468. free_irq(ctrl_info->msix_vectors[i],
  2469. ctrl_info->intr_data[i]);
  2470. }
  2471. static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
  2472. {
  2473. unsigned int i;
  2474. int max_vectors;
  2475. int num_vectors_enabled;
  2476. struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
  2477. max_vectors = ctrl_info->num_queue_groups;
  2478. for (i = 0; i < max_vectors; i++)
  2479. msix_entries[i].entry = i;
  2480. num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
  2481. msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
  2482. if (num_vectors_enabled < 0) {
  2483. dev_err(&ctrl_info->pci_dev->dev,
  2484. "MSI-X init failed with error %d\n",
  2485. num_vectors_enabled);
  2486. return num_vectors_enabled;
  2487. }
  2488. ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
  2489. for (i = 0; i < num_vectors_enabled; i++) {
  2490. ctrl_info->msix_vectors[i] = msix_entries[i].vector;
  2491. ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
  2492. }
  2493. return 0;
  2494. }
  2495. static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
  2496. {
  2497. int i;
  2498. int rc;
  2499. int cpu;
  2500. cpu = cpumask_first(cpu_online_mask);
  2501. for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
  2502. rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
  2503. get_cpu_mask(cpu));
  2504. if (rc)
  2505. dev_err(&ctrl_info->pci_dev->dev,
  2506. "error %d setting affinity hint for irq vector %u\n",
  2507. rc, ctrl_info->msix_vectors[i]);
  2508. cpu = cpumask_next(cpu, cpu_online_mask);
  2509. }
  2510. }
  2511. static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
  2512. {
  2513. int i;
  2514. for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
  2515. irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
  2516. }
  2517. static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
  2518. {
  2519. unsigned int i;
  2520. size_t alloc_length;
  2521. size_t element_array_length_per_iq;
  2522. size_t element_array_length_per_oq;
  2523. void *element_array;
  2524. void *next_queue_index;
  2525. void *aligned_pointer;
  2526. unsigned int num_inbound_queues;
  2527. unsigned int num_outbound_queues;
  2528. unsigned int num_queue_indexes;
  2529. struct pqi_queue_group *queue_group;
  2530. element_array_length_per_iq =
  2531. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
  2532. ctrl_info->num_elements_per_iq;
  2533. element_array_length_per_oq =
  2534. PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
  2535. ctrl_info->num_elements_per_oq;
  2536. num_inbound_queues = ctrl_info->num_queue_groups * 2;
  2537. num_outbound_queues = ctrl_info->num_queue_groups;
  2538. num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
  2539. aligned_pointer = NULL;
  2540. for (i = 0; i < num_inbound_queues; i++) {
  2541. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2542. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2543. aligned_pointer += element_array_length_per_iq;
  2544. }
  2545. for (i = 0; i < num_outbound_queues; i++) {
  2546. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2547. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2548. aligned_pointer += element_array_length_per_oq;
  2549. }
  2550. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2551. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2552. aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
  2553. PQI_EVENT_OQ_ELEMENT_LENGTH;
  2554. for (i = 0; i < num_queue_indexes; i++) {
  2555. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2556. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2557. aligned_pointer += sizeof(pqi_index_t);
  2558. }
  2559. alloc_length = (size_t)aligned_pointer +
  2560. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
  2561. ctrl_info->queue_memory_base =
  2562. dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
  2563. alloc_length,
  2564. &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
  2565. if (!ctrl_info->queue_memory_base) {
  2566. dev_err(&ctrl_info->pci_dev->dev,
  2567. "failed to allocate memory for PQI admin queues\n");
  2568. return -ENOMEM;
  2569. }
  2570. ctrl_info->queue_memory_length = alloc_length;
  2571. element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
  2572. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2573. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2574. queue_group = &ctrl_info->queue_groups[i];
  2575. queue_group->iq_element_array[RAID_PATH] = element_array;
  2576. queue_group->iq_element_array_bus_addr[RAID_PATH] =
  2577. ctrl_info->queue_memory_base_dma_handle +
  2578. (element_array - ctrl_info->queue_memory_base);
  2579. element_array += element_array_length_per_iq;
  2580. element_array = PTR_ALIGN(element_array,
  2581. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2582. queue_group->iq_element_array[AIO_PATH] = element_array;
  2583. queue_group->iq_element_array_bus_addr[AIO_PATH] =
  2584. ctrl_info->queue_memory_base_dma_handle +
  2585. (element_array - ctrl_info->queue_memory_base);
  2586. element_array += element_array_length_per_iq;
  2587. element_array = PTR_ALIGN(element_array,
  2588. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2589. }
  2590. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2591. queue_group = &ctrl_info->queue_groups[i];
  2592. queue_group->oq_element_array = element_array;
  2593. queue_group->oq_element_array_bus_addr =
  2594. ctrl_info->queue_memory_base_dma_handle +
  2595. (element_array - ctrl_info->queue_memory_base);
  2596. element_array += element_array_length_per_oq;
  2597. element_array = PTR_ALIGN(element_array,
  2598. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2599. }
  2600. ctrl_info->event_queue.oq_element_array = element_array;
  2601. ctrl_info->event_queue.oq_element_array_bus_addr =
  2602. ctrl_info->queue_memory_base_dma_handle +
  2603. (element_array - ctrl_info->queue_memory_base);
  2604. element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
  2605. PQI_EVENT_OQ_ELEMENT_LENGTH;
  2606. next_queue_index = PTR_ALIGN(element_array,
  2607. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2608. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2609. queue_group = &ctrl_info->queue_groups[i];
  2610. queue_group->iq_ci[RAID_PATH] = next_queue_index;
  2611. queue_group->iq_ci_bus_addr[RAID_PATH] =
  2612. ctrl_info->queue_memory_base_dma_handle +
  2613. (next_queue_index - ctrl_info->queue_memory_base);
  2614. next_queue_index += sizeof(pqi_index_t);
  2615. next_queue_index = PTR_ALIGN(next_queue_index,
  2616. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2617. queue_group->iq_ci[AIO_PATH] = next_queue_index;
  2618. queue_group->iq_ci_bus_addr[AIO_PATH] =
  2619. ctrl_info->queue_memory_base_dma_handle +
  2620. (next_queue_index - ctrl_info->queue_memory_base);
  2621. next_queue_index += sizeof(pqi_index_t);
  2622. next_queue_index = PTR_ALIGN(next_queue_index,
  2623. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2624. queue_group->oq_pi = next_queue_index;
  2625. queue_group->oq_pi_bus_addr =
  2626. ctrl_info->queue_memory_base_dma_handle +
  2627. (next_queue_index - ctrl_info->queue_memory_base);
  2628. next_queue_index += sizeof(pqi_index_t);
  2629. next_queue_index = PTR_ALIGN(next_queue_index,
  2630. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2631. }
  2632. ctrl_info->event_queue.oq_pi = next_queue_index;
  2633. ctrl_info->event_queue.oq_pi_bus_addr =
  2634. ctrl_info->queue_memory_base_dma_handle +
  2635. (next_queue_index - ctrl_info->queue_memory_base);
  2636. return 0;
  2637. }
  2638. static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
  2639. {
  2640. unsigned int i;
  2641. u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
  2642. u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
  2643. /*
  2644. * Initialize the backpointers to the controller structure in
  2645. * each operational queue group structure.
  2646. */
  2647. for (i = 0; i < ctrl_info->num_queue_groups; i++)
  2648. ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
  2649. /*
  2650. * Assign IDs to all operational queues. Note that the IDs
  2651. * assigned to operational IQs are independent of the IDs
  2652. * assigned to operational OQs.
  2653. */
  2654. ctrl_info->event_queue.oq_id = next_oq_id++;
  2655. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2656. ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
  2657. ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
  2658. ctrl_info->queue_groups[i].oq_id = next_oq_id++;
  2659. }
  2660. /*
  2661. * Assign MSI-X table entry indexes to all queues. Note that the
  2662. * interrupt for the event queue is shared with the first queue group.
  2663. */
  2664. ctrl_info->event_queue.int_msg_num = 0;
  2665. for (i = 0; i < ctrl_info->num_queue_groups; i++)
  2666. ctrl_info->queue_groups[i].int_msg_num = i;
  2667. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2668. spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
  2669. spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
  2670. INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
  2671. INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
  2672. }
  2673. }
  2674. static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
  2675. {
  2676. size_t alloc_length;
  2677. struct pqi_admin_queues_aligned *admin_queues_aligned;
  2678. struct pqi_admin_queues *admin_queues;
  2679. alloc_length = sizeof(struct pqi_admin_queues_aligned) +
  2680. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
  2681. ctrl_info->admin_queue_memory_base =
  2682. dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
  2683. alloc_length,
  2684. &ctrl_info->admin_queue_memory_base_dma_handle,
  2685. GFP_KERNEL);
  2686. if (!ctrl_info->admin_queue_memory_base)
  2687. return -ENOMEM;
  2688. ctrl_info->admin_queue_memory_length = alloc_length;
  2689. admin_queues = &ctrl_info->admin_queues;
  2690. admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
  2691. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2692. admin_queues->iq_element_array =
  2693. &admin_queues_aligned->iq_element_array;
  2694. admin_queues->oq_element_array =
  2695. &admin_queues_aligned->oq_element_array;
  2696. admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
  2697. admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
  2698. admin_queues->iq_element_array_bus_addr =
  2699. ctrl_info->admin_queue_memory_base_dma_handle +
  2700. (admin_queues->iq_element_array -
  2701. ctrl_info->admin_queue_memory_base);
  2702. admin_queues->oq_element_array_bus_addr =
  2703. ctrl_info->admin_queue_memory_base_dma_handle +
  2704. (admin_queues->oq_element_array -
  2705. ctrl_info->admin_queue_memory_base);
  2706. admin_queues->iq_ci_bus_addr =
  2707. ctrl_info->admin_queue_memory_base_dma_handle +
  2708. ((void *)admin_queues->iq_ci -
  2709. ctrl_info->admin_queue_memory_base);
  2710. admin_queues->oq_pi_bus_addr =
  2711. ctrl_info->admin_queue_memory_base_dma_handle +
  2712. ((void *)admin_queues->oq_pi -
  2713. ctrl_info->admin_queue_memory_base);
  2714. return 0;
  2715. }
  2716. #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
  2717. #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
  2718. static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
  2719. {
  2720. struct pqi_device_registers __iomem *pqi_registers;
  2721. struct pqi_admin_queues *admin_queues;
  2722. unsigned long timeout;
  2723. u8 status;
  2724. u32 reg;
  2725. pqi_registers = ctrl_info->pqi_registers;
  2726. admin_queues = &ctrl_info->admin_queues;
  2727. writeq((u64)admin_queues->iq_element_array_bus_addr,
  2728. &pqi_registers->admin_iq_element_array_addr);
  2729. writeq((u64)admin_queues->oq_element_array_bus_addr,
  2730. &pqi_registers->admin_oq_element_array_addr);
  2731. writeq((u64)admin_queues->iq_ci_bus_addr,
  2732. &pqi_registers->admin_iq_ci_addr);
  2733. writeq((u64)admin_queues->oq_pi_bus_addr,
  2734. &pqi_registers->admin_oq_pi_addr);
  2735. reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
  2736. (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
  2737. (admin_queues->int_msg_num << 16);
  2738. writel(reg, &pqi_registers->admin_iq_num_elements);
  2739. writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
  2740. &pqi_registers->function_and_status_code);
  2741. timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
  2742. while (1) {
  2743. status = readb(&pqi_registers->function_and_status_code);
  2744. if (status == PQI_STATUS_IDLE)
  2745. break;
  2746. if (time_after(jiffies, timeout))
  2747. return -ETIMEDOUT;
  2748. msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
  2749. }
  2750. /*
  2751. * The offset registers are not initialized to the correct
  2752. * offsets until *after* the create admin queue pair command
  2753. * completes successfully.
  2754. */
  2755. admin_queues->iq_pi = ctrl_info->iomem_base +
  2756. PQI_DEVICE_REGISTERS_OFFSET +
  2757. readq(&pqi_registers->admin_iq_pi_offset);
  2758. admin_queues->oq_ci = ctrl_info->iomem_base +
  2759. PQI_DEVICE_REGISTERS_OFFSET +
  2760. readq(&pqi_registers->admin_oq_ci_offset);
  2761. return 0;
  2762. }
  2763. static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
  2764. struct pqi_general_admin_request *request)
  2765. {
  2766. struct pqi_admin_queues *admin_queues;
  2767. void *next_element;
  2768. pqi_index_t iq_pi;
  2769. admin_queues = &ctrl_info->admin_queues;
  2770. iq_pi = admin_queues->iq_pi_copy;
  2771. next_element = admin_queues->iq_element_array +
  2772. (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
  2773. memcpy(next_element, request, sizeof(*request));
  2774. iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
  2775. admin_queues->iq_pi_copy = iq_pi;
  2776. /*
  2777. * This write notifies the controller that an IU is available to be
  2778. * processed.
  2779. */
  2780. writel(iq_pi, admin_queues->iq_pi);
  2781. }
  2782. static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
  2783. struct pqi_general_admin_response *response)
  2784. {
  2785. struct pqi_admin_queues *admin_queues;
  2786. pqi_index_t oq_pi;
  2787. pqi_index_t oq_ci;
  2788. unsigned long timeout;
  2789. admin_queues = &ctrl_info->admin_queues;
  2790. oq_ci = admin_queues->oq_ci_copy;
  2791. timeout = (3 * HZ) + jiffies;
  2792. while (1) {
  2793. oq_pi = *admin_queues->oq_pi;
  2794. if (oq_pi != oq_ci)
  2795. break;
  2796. if (time_after(jiffies, timeout)) {
  2797. dev_err(&ctrl_info->pci_dev->dev,
  2798. "timed out waiting for admin response\n");
  2799. return -ETIMEDOUT;
  2800. }
  2801. usleep_range(1000, 2000);
  2802. }
  2803. memcpy(response, admin_queues->oq_element_array +
  2804. (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
  2805. oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
  2806. admin_queues->oq_ci_copy = oq_ci;
  2807. writel(oq_ci, admin_queues->oq_ci);
  2808. return 0;
  2809. }
  2810. static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
  2811. struct pqi_queue_group *queue_group, enum pqi_io_path path,
  2812. struct pqi_io_request *io_request)
  2813. {
  2814. struct pqi_io_request *next;
  2815. void *next_element;
  2816. pqi_index_t iq_pi;
  2817. pqi_index_t iq_ci;
  2818. size_t iu_length;
  2819. unsigned long flags;
  2820. unsigned int num_elements_needed;
  2821. unsigned int num_elements_to_end_of_queue;
  2822. size_t copy_count;
  2823. struct pqi_iu_header *request;
  2824. spin_lock_irqsave(&queue_group->submit_lock[path], flags);
  2825. if (io_request)
  2826. list_add_tail(&io_request->request_list_entry,
  2827. &queue_group->request_list[path]);
  2828. iq_pi = queue_group->iq_pi_copy[path];
  2829. list_for_each_entry_safe(io_request, next,
  2830. &queue_group->request_list[path], request_list_entry) {
  2831. request = io_request->iu;
  2832. iu_length = get_unaligned_le16(&request->iu_length) +
  2833. PQI_REQUEST_HEADER_LENGTH;
  2834. num_elements_needed =
  2835. DIV_ROUND_UP(iu_length,
  2836. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  2837. iq_ci = *queue_group->iq_ci[path];
  2838. if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
  2839. ctrl_info->num_elements_per_iq))
  2840. break;
  2841. put_unaligned_le16(queue_group->oq_id,
  2842. &request->response_queue_id);
  2843. next_element = queue_group->iq_element_array[path] +
  2844. (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  2845. num_elements_to_end_of_queue =
  2846. ctrl_info->num_elements_per_iq - iq_pi;
  2847. if (num_elements_needed <= num_elements_to_end_of_queue) {
  2848. memcpy(next_element, request, iu_length);
  2849. } else {
  2850. copy_count = num_elements_to_end_of_queue *
  2851. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
  2852. memcpy(next_element, request, copy_count);
  2853. memcpy(queue_group->iq_element_array[path],
  2854. (u8 *)request + copy_count,
  2855. iu_length - copy_count);
  2856. }
  2857. iq_pi = (iq_pi + num_elements_needed) %
  2858. ctrl_info->num_elements_per_iq;
  2859. list_del(&io_request->request_list_entry);
  2860. }
  2861. if (iq_pi != queue_group->iq_pi_copy[path]) {
  2862. queue_group->iq_pi_copy[path] = iq_pi;
  2863. /*
  2864. * This write notifies the controller that one or more IUs are
  2865. * available to be processed.
  2866. */
  2867. writel(iq_pi, queue_group->iq_pi[path]);
  2868. }
  2869. spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
  2870. }
  2871. static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
  2872. void *context)
  2873. {
  2874. struct completion *waiting = context;
  2875. complete(waiting);
  2876. }
  2877. static int pqi_submit_raid_request_synchronous_with_io_request(
  2878. struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
  2879. unsigned long timeout_msecs)
  2880. {
  2881. int rc = 0;
  2882. DECLARE_COMPLETION_ONSTACK(wait);
  2883. io_request->io_complete_callback = pqi_raid_synchronous_complete;
  2884. io_request->context = &wait;
  2885. pqi_start_io(ctrl_info,
  2886. &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
  2887. io_request);
  2888. if (timeout_msecs == NO_TIMEOUT) {
  2889. wait_for_completion_io(&wait);
  2890. } else {
  2891. if (!wait_for_completion_io_timeout(&wait,
  2892. msecs_to_jiffies(timeout_msecs))) {
  2893. dev_warn(&ctrl_info->pci_dev->dev,
  2894. "command timed out\n");
  2895. rc = -ETIMEDOUT;
  2896. }
  2897. }
  2898. return rc;
  2899. }
  2900. static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
  2901. struct pqi_iu_header *request, unsigned int flags,
  2902. struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
  2903. {
  2904. int rc;
  2905. struct pqi_io_request *io_request;
  2906. unsigned long start_jiffies;
  2907. unsigned long msecs_blocked;
  2908. size_t iu_length;
  2909. /*
  2910. * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
  2911. * are mutually exclusive.
  2912. */
  2913. if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
  2914. if (down_interruptible(&ctrl_info->sync_request_sem))
  2915. return -ERESTARTSYS;
  2916. } else {
  2917. if (timeout_msecs == NO_TIMEOUT) {
  2918. down(&ctrl_info->sync_request_sem);
  2919. } else {
  2920. start_jiffies = jiffies;
  2921. if (down_timeout(&ctrl_info->sync_request_sem,
  2922. msecs_to_jiffies(timeout_msecs)))
  2923. return -ETIMEDOUT;
  2924. msecs_blocked =
  2925. jiffies_to_msecs(jiffies - start_jiffies);
  2926. if (msecs_blocked >= timeout_msecs)
  2927. return -ETIMEDOUT;
  2928. timeout_msecs -= msecs_blocked;
  2929. }
  2930. }
  2931. io_request = pqi_alloc_io_request(ctrl_info);
  2932. put_unaligned_le16(io_request->index,
  2933. &(((struct pqi_raid_path_request *)request)->request_id));
  2934. if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
  2935. ((struct pqi_raid_path_request *)request)->error_index =
  2936. ((struct pqi_raid_path_request *)request)->request_id;
  2937. iu_length = get_unaligned_le16(&request->iu_length) +
  2938. PQI_REQUEST_HEADER_LENGTH;
  2939. memcpy(io_request->iu, request, iu_length);
  2940. rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
  2941. io_request, timeout_msecs);
  2942. if (error_info) {
  2943. if (io_request->error_info)
  2944. memcpy(error_info, io_request->error_info,
  2945. sizeof(*error_info));
  2946. else
  2947. memset(error_info, 0, sizeof(*error_info));
  2948. } else if (rc == 0 && io_request->error_info) {
  2949. u8 scsi_status;
  2950. struct pqi_raid_error_info *raid_error_info;
  2951. raid_error_info = io_request->error_info;
  2952. scsi_status = raid_error_info->status;
  2953. if (scsi_status == SAM_STAT_CHECK_CONDITION &&
  2954. raid_error_info->data_out_result ==
  2955. PQI_DATA_IN_OUT_UNDERFLOW)
  2956. scsi_status = SAM_STAT_GOOD;
  2957. if (scsi_status != SAM_STAT_GOOD)
  2958. rc = -EIO;
  2959. }
  2960. pqi_free_io_request(io_request);
  2961. up(&ctrl_info->sync_request_sem);
  2962. return rc;
  2963. }
  2964. static int pqi_validate_admin_response(
  2965. struct pqi_general_admin_response *response, u8 expected_function_code)
  2966. {
  2967. if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
  2968. return -EINVAL;
  2969. if (get_unaligned_le16(&response->header.iu_length) !=
  2970. PQI_GENERAL_ADMIN_IU_LENGTH)
  2971. return -EINVAL;
  2972. if (response->function_code != expected_function_code)
  2973. return -EINVAL;
  2974. if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
  2975. return -EINVAL;
  2976. return 0;
  2977. }
  2978. static int pqi_submit_admin_request_synchronous(
  2979. struct pqi_ctrl_info *ctrl_info,
  2980. struct pqi_general_admin_request *request,
  2981. struct pqi_general_admin_response *response)
  2982. {
  2983. int rc;
  2984. pqi_submit_admin_request(ctrl_info, request);
  2985. rc = pqi_poll_for_admin_response(ctrl_info, response);
  2986. if (rc == 0)
  2987. rc = pqi_validate_admin_response(response,
  2988. request->function_code);
  2989. return rc;
  2990. }
  2991. static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
  2992. {
  2993. int rc;
  2994. struct pqi_general_admin_request request;
  2995. struct pqi_general_admin_response response;
  2996. struct pqi_device_capability *capability;
  2997. struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
  2998. capability = kmalloc(sizeof(*capability), GFP_KERNEL);
  2999. if (!capability)
  3000. return -ENOMEM;
  3001. memset(&request, 0, sizeof(request));
  3002. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3003. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3004. &request.header.iu_length);
  3005. request.function_code =
  3006. PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
  3007. put_unaligned_le32(sizeof(*capability),
  3008. &request.data.report_device_capability.buffer_length);
  3009. rc = pqi_map_single(ctrl_info->pci_dev,
  3010. &request.data.report_device_capability.sg_descriptor,
  3011. capability, sizeof(*capability),
  3012. PCI_DMA_FROMDEVICE);
  3013. if (rc)
  3014. goto out;
  3015. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3016. &response);
  3017. pqi_pci_unmap(ctrl_info->pci_dev,
  3018. &request.data.report_device_capability.sg_descriptor, 1,
  3019. PCI_DMA_FROMDEVICE);
  3020. if (rc)
  3021. goto out;
  3022. if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
  3023. rc = -EIO;
  3024. goto out;
  3025. }
  3026. ctrl_info->max_inbound_queues =
  3027. get_unaligned_le16(&capability->max_inbound_queues);
  3028. ctrl_info->max_elements_per_iq =
  3029. get_unaligned_le16(&capability->max_elements_per_iq);
  3030. ctrl_info->max_iq_element_length =
  3031. get_unaligned_le16(&capability->max_iq_element_length)
  3032. * 16;
  3033. ctrl_info->max_outbound_queues =
  3034. get_unaligned_le16(&capability->max_outbound_queues);
  3035. ctrl_info->max_elements_per_oq =
  3036. get_unaligned_le16(&capability->max_elements_per_oq);
  3037. ctrl_info->max_oq_element_length =
  3038. get_unaligned_le16(&capability->max_oq_element_length)
  3039. * 16;
  3040. sop_iu_layer_descriptor =
  3041. &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
  3042. ctrl_info->max_inbound_iu_length_per_firmware =
  3043. get_unaligned_le16(
  3044. &sop_iu_layer_descriptor->max_inbound_iu_length);
  3045. ctrl_info->inbound_spanning_supported =
  3046. sop_iu_layer_descriptor->inbound_spanning_supported;
  3047. ctrl_info->outbound_spanning_supported =
  3048. sop_iu_layer_descriptor->outbound_spanning_supported;
  3049. out:
  3050. kfree(capability);
  3051. return rc;
  3052. }
  3053. static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
  3054. {
  3055. if (ctrl_info->max_iq_element_length <
  3056. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
  3057. dev_err(&ctrl_info->pci_dev->dev,
  3058. "max. inbound queue element length of %d is less than the required length of %d\n",
  3059. ctrl_info->max_iq_element_length,
  3060. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  3061. return -EINVAL;
  3062. }
  3063. if (ctrl_info->max_oq_element_length <
  3064. PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
  3065. dev_err(&ctrl_info->pci_dev->dev,
  3066. "max. outbound queue element length of %d is less than the required length of %d\n",
  3067. ctrl_info->max_oq_element_length,
  3068. PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
  3069. return -EINVAL;
  3070. }
  3071. if (ctrl_info->max_inbound_iu_length_per_firmware <
  3072. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
  3073. dev_err(&ctrl_info->pci_dev->dev,
  3074. "max. inbound IU length of %u is less than the min. required length of %d\n",
  3075. ctrl_info->max_inbound_iu_length_per_firmware,
  3076. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  3077. return -EINVAL;
  3078. }
  3079. if (!ctrl_info->inbound_spanning_supported) {
  3080. dev_err(&ctrl_info->pci_dev->dev,
  3081. "the controller does not support inbound spanning\n");
  3082. return -EINVAL;
  3083. }
  3084. if (ctrl_info->outbound_spanning_supported) {
  3085. dev_err(&ctrl_info->pci_dev->dev,
  3086. "the controller supports outbound spanning but this driver does not\n");
  3087. return -EINVAL;
  3088. }
  3089. return 0;
  3090. }
  3091. static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
  3092. bool inbound_queue, u16 queue_id)
  3093. {
  3094. struct pqi_general_admin_request request;
  3095. struct pqi_general_admin_response response;
  3096. memset(&request, 0, sizeof(request));
  3097. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3098. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3099. &request.header.iu_length);
  3100. if (inbound_queue)
  3101. request.function_code =
  3102. PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
  3103. else
  3104. request.function_code =
  3105. PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
  3106. put_unaligned_le16(queue_id,
  3107. &request.data.delete_operational_queue.queue_id);
  3108. return pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3109. &response);
  3110. }
  3111. static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
  3112. {
  3113. int rc;
  3114. struct pqi_event_queue *event_queue;
  3115. struct pqi_general_admin_request request;
  3116. struct pqi_general_admin_response response;
  3117. event_queue = &ctrl_info->event_queue;
  3118. /*
  3119. * Create OQ (Outbound Queue - device to host queue) to dedicate
  3120. * to events.
  3121. */
  3122. memset(&request, 0, sizeof(request));
  3123. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3124. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3125. &request.header.iu_length);
  3126. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
  3127. put_unaligned_le16(event_queue->oq_id,
  3128. &request.data.create_operational_oq.queue_id);
  3129. put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
  3130. &request.data.create_operational_oq.element_array_addr);
  3131. put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
  3132. &request.data.create_operational_oq.pi_addr);
  3133. put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
  3134. &request.data.create_operational_oq.num_elements);
  3135. put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
  3136. &request.data.create_operational_oq.element_length);
  3137. request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
  3138. put_unaligned_le16(event_queue->int_msg_num,
  3139. &request.data.create_operational_oq.int_msg_num);
  3140. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3141. &response);
  3142. if (rc)
  3143. return rc;
  3144. event_queue->oq_ci = ctrl_info->iomem_base +
  3145. PQI_DEVICE_REGISTERS_OFFSET +
  3146. get_unaligned_le64(
  3147. &response.data.create_operational_oq.oq_ci_offset);
  3148. return 0;
  3149. }
  3150. static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
  3151. {
  3152. unsigned int i;
  3153. int rc;
  3154. struct pqi_queue_group *queue_group;
  3155. struct pqi_general_admin_request request;
  3156. struct pqi_general_admin_response response;
  3157. i = ctrl_info->num_active_queue_groups;
  3158. queue_group = &ctrl_info->queue_groups[i];
  3159. /*
  3160. * Create IQ (Inbound Queue - host to device queue) for
  3161. * RAID path.
  3162. */
  3163. memset(&request, 0, sizeof(request));
  3164. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3165. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3166. &request.header.iu_length);
  3167. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
  3168. put_unaligned_le16(queue_group->iq_id[RAID_PATH],
  3169. &request.data.create_operational_iq.queue_id);
  3170. put_unaligned_le64(
  3171. (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
  3172. &request.data.create_operational_iq.element_array_addr);
  3173. put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
  3174. &request.data.create_operational_iq.ci_addr);
  3175. put_unaligned_le16(ctrl_info->num_elements_per_iq,
  3176. &request.data.create_operational_iq.num_elements);
  3177. put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
  3178. &request.data.create_operational_iq.element_length);
  3179. request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
  3180. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3181. &response);
  3182. if (rc) {
  3183. dev_err(&ctrl_info->pci_dev->dev,
  3184. "error creating inbound RAID queue\n");
  3185. return rc;
  3186. }
  3187. queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
  3188. PQI_DEVICE_REGISTERS_OFFSET +
  3189. get_unaligned_le64(
  3190. &response.data.create_operational_iq.iq_pi_offset);
  3191. /*
  3192. * Create IQ (Inbound Queue - host to device queue) for
  3193. * Advanced I/O (AIO) path.
  3194. */
  3195. memset(&request, 0, sizeof(request));
  3196. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3197. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3198. &request.header.iu_length);
  3199. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
  3200. put_unaligned_le16(queue_group->iq_id[AIO_PATH],
  3201. &request.data.create_operational_iq.queue_id);
  3202. put_unaligned_le64((u64)queue_group->
  3203. iq_element_array_bus_addr[AIO_PATH],
  3204. &request.data.create_operational_iq.element_array_addr);
  3205. put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
  3206. &request.data.create_operational_iq.ci_addr);
  3207. put_unaligned_le16(ctrl_info->num_elements_per_iq,
  3208. &request.data.create_operational_iq.num_elements);
  3209. put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
  3210. &request.data.create_operational_iq.element_length);
  3211. request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
  3212. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3213. &response);
  3214. if (rc) {
  3215. dev_err(&ctrl_info->pci_dev->dev,
  3216. "error creating inbound AIO queue\n");
  3217. goto delete_inbound_queue_raid;
  3218. }
  3219. queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
  3220. PQI_DEVICE_REGISTERS_OFFSET +
  3221. get_unaligned_le64(
  3222. &response.data.create_operational_iq.iq_pi_offset);
  3223. /*
  3224. * Designate the 2nd IQ as the AIO path. By default, all IQs are
  3225. * assumed to be for RAID path I/O unless we change the queue's
  3226. * property.
  3227. */
  3228. memset(&request, 0, sizeof(request));
  3229. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3230. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3231. &request.header.iu_length);
  3232. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
  3233. put_unaligned_le16(queue_group->iq_id[AIO_PATH],
  3234. &request.data.change_operational_iq_properties.queue_id);
  3235. put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
  3236. &request.data.change_operational_iq_properties.vendor_specific);
  3237. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3238. &response);
  3239. if (rc) {
  3240. dev_err(&ctrl_info->pci_dev->dev,
  3241. "error changing queue property\n");
  3242. goto delete_inbound_queue_aio;
  3243. }
  3244. /*
  3245. * Create OQ (Outbound Queue - device to host queue).
  3246. */
  3247. memset(&request, 0, sizeof(request));
  3248. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3249. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3250. &request.header.iu_length);
  3251. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
  3252. put_unaligned_le16(queue_group->oq_id,
  3253. &request.data.create_operational_oq.queue_id);
  3254. put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
  3255. &request.data.create_operational_oq.element_array_addr);
  3256. put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
  3257. &request.data.create_operational_oq.pi_addr);
  3258. put_unaligned_le16(ctrl_info->num_elements_per_oq,
  3259. &request.data.create_operational_oq.num_elements);
  3260. put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
  3261. &request.data.create_operational_oq.element_length);
  3262. request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
  3263. put_unaligned_le16(queue_group->int_msg_num,
  3264. &request.data.create_operational_oq.int_msg_num);
  3265. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3266. &response);
  3267. if (rc) {
  3268. dev_err(&ctrl_info->pci_dev->dev,
  3269. "error creating outbound queue\n");
  3270. goto delete_inbound_queue_aio;
  3271. }
  3272. queue_group->oq_ci = ctrl_info->iomem_base +
  3273. PQI_DEVICE_REGISTERS_OFFSET +
  3274. get_unaligned_le64(
  3275. &response.data.create_operational_oq.oq_ci_offset);
  3276. ctrl_info->num_active_queue_groups++;
  3277. return 0;
  3278. delete_inbound_queue_aio:
  3279. pqi_delete_operational_queue(ctrl_info, true,
  3280. queue_group->iq_id[AIO_PATH]);
  3281. delete_inbound_queue_raid:
  3282. pqi_delete_operational_queue(ctrl_info, true,
  3283. queue_group->iq_id[RAID_PATH]);
  3284. return rc;
  3285. }
  3286. static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
  3287. {
  3288. int rc;
  3289. unsigned int i;
  3290. rc = pqi_create_event_queue(ctrl_info);
  3291. if (rc) {
  3292. dev_err(&ctrl_info->pci_dev->dev,
  3293. "error creating event queue\n");
  3294. return rc;
  3295. }
  3296. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  3297. rc = pqi_create_queue_group(ctrl_info);
  3298. if (rc) {
  3299. dev_err(&ctrl_info->pci_dev->dev,
  3300. "error creating queue group number %u/%u\n",
  3301. i, ctrl_info->num_queue_groups);
  3302. return rc;
  3303. }
  3304. }
  3305. return 0;
  3306. }
  3307. #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
  3308. (offsetof(struct pqi_event_config, descriptors) + \
  3309. (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
  3310. static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
  3311. {
  3312. int rc;
  3313. unsigned int i;
  3314. struct pqi_event_config *event_config;
  3315. struct pqi_general_management_request request;
  3316. event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3317. GFP_KERNEL);
  3318. if (!event_config)
  3319. return -ENOMEM;
  3320. memset(&request, 0, sizeof(request));
  3321. request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
  3322. put_unaligned_le16(offsetof(struct pqi_general_management_request,
  3323. data.report_event_configuration.sg_descriptors[1]) -
  3324. PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
  3325. put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3326. &request.data.report_event_configuration.buffer_length);
  3327. rc = pqi_map_single(ctrl_info->pci_dev,
  3328. request.data.report_event_configuration.sg_descriptors,
  3329. event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3330. PCI_DMA_FROMDEVICE);
  3331. if (rc)
  3332. goto out;
  3333. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  3334. 0, NULL, NO_TIMEOUT);
  3335. pqi_pci_unmap(ctrl_info->pci_dev,
  3336. request.data.report_event_configuration.sg_descriptors, 1,
  3337. PCI_DMA_FROMDEVICE);
  3338. if (rc)
  3339. goto out;
  3340. for (i = 0; i < event_config->num_event_descriptors; i++)
  3341. put_unaligned_le16(ctrl_info->event_queue.oq_id,
  3342. &event_config->descriptors[i].oq_id);
  3343. memset(&request, 0, sizeof(request));
  3344. request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
  3345. put_unaligned_le16(offsetof(struct pqi_general_management_request,
  3346. data.report_event_configuration.sg_descriptors[1]) -
  3347. PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
  3348. put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3349. &request.data.report_event_configuration.buffer_length);
  3350. rc = pqi_map_single(ctrl_info->pci_dev,
  3351. request.data.report_event_configuration.sg_descriptors,
  3352. event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3353. PCI_DMA_TODEVICE);
  3354. if (rc)
  3355. goto out;
  3356. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  3357. NULL, NO_TIMEOUT);
  3358. pqi_pci_unmap(ctrl_info->pci_dev,
  3359. request.data.report_event_configuration.sg_descriptors, 1,
  3360. PCI_DMA_TODEVICE);
  3361. out:
  3362. kfree(event_config);
  3363. return rc;
  3364. }
  3365. static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
  3366. {
  3367. unsigned int i;
  3368. struct device *dev;
  3369. size_t sg_chain_buffer_length;
  3370. struct pqi_io_request *io_request;
  3371. if (!ctrl_info->io_request_pool)
  3372. return;
  3373. dev = &ctrl_info->pci_dev->dev;
  3374. sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
  3375. io_request = ctrl_info->io_request_pool;
  3376. for (i = 0; i < ctrl_info->max_io_slots; i++) {
  3377. kfree(io_request->iu);
  3378. if (!io_request->sg_chain_buffer)
  3379. break;
  3380. dma_free_coherent(dev, sg_chain_buffer_length,
  3381. io_request->sg_chain_buffer,
  3382. io_request->sg_chain_buffer_dma_handle);
  3383. io_request++;
  3384. }
  3385. kfree(ctrl_info->io_request_pool);
  3386. ctrl_info->io_request_pool = NULL;
  3387. }
  3388. static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
  3389. {
  3390. ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
  3391. ctrl_info->error_buffer_length,
  3392. &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
  3393. if (!ctrl_info->error_buffer)
  3394. return -ENOMEM;
  3395. return 0;
  3396. }
  3397. static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
  3398. {
  3399. unsigned int i;
  3400. void *sg_chain_buffer;
  3401. size_t sg_chain_buffer_length;
  3402. dma_addr_t sg_chain_buffer_dma_handle;
  3403. struct device *dev;
  3404. struct pqi_io_request *io_request;
  3405. ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
  3406. sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
  3407. if (!ctrl_info->io_request_pool) {
  3408. dev_err(&ctrl_info->pci_dev->dev,
  3409. "failed to allocate I/O request pool\n");
  3410. goto error;
  3411. }
  3412. dev = &ctrl_info->pci_dev->dev;
  3413. sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
  3414. io_request = ctrl_info->io_request_pool;
  3415. for (i = 0; i < ctrl_info->max_io_slots; i++) {
  3416. io_request->iu =
  3417. kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
  3418. if (!io_request->iu) {
  3419. dev_err(&ctrl_info->pci_dev->dev,
  3420. "failed to allocate IU buffers\n");
  3421. goto error;
  3422. }
  3423. sg_chain_buffer = dma_alloc_coherent(dev,
  3424. sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
  3425. GFP_KERNEL);
  3426. if (!sg_chain_buffer) {
  3427. dev_err(&ctrl_info->pci_dev->dev,
  3428. "failed to allocate PQI scatter-gather chain buffers\n");
  3429. goto error;
  3430. }
  3431. io_request->index = i;
  3432. io_request->sg_chain_buffer = sg_chain_buffer;
  3433. io_request->sg_chain_buffer_dma_handle =
  3434. sg_chain_buffer_dma_handle;
  3435. io_request++;
  3436. }
  3437. return 0;
  3438. error:
  3439. pqi_free_all_io_requests(ctrl_info);
  3440. return -ENOMEM;
  3441. }
  3442. /*
  3443. * Calculate required resources that are sized based on max. outstanding
  3444. * requests and max. transfer size.
  3445. */
  3446. static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
  3447. {
  3448. u32 max_transfer_size;
  3449. u32 max_sg_entries;
  3450. ctrl_info->scsi_ml_can_queue =
  3451. ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
  3452. ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
  3453. ctrl_info->error_buffer_length =
  3454. ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
  3455. max_transfer_size =
  3456. min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
  3457. max_sg_entries = max_transfer_size / PAGE_SIZE;
  3458. /* +1 to cover when the buffer is not page-aligned. */
  3459. max_sg_entries++;
  3460. max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
  3461. max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
  3462. ctrl_info->sg_chain_buffer_length =
  3463. max_sg_entries * sizeof(struct pqi_sg_descriptor);
  3464. ctrl_info->sg_tablesize = max_sg_entries;
  3465. ctrl_info->max_sectors = max_transfer_size / 512;
  3466. }
  3467. static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
  3468. {
  3469. int num_cpus;
  3470. int max_queue_groups;
  3471. int num_queue_groups;
  3472. u16 num_elements_per_iq;
  3473. u16 num_elements_per_oq;
  3474. max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
  3475. ctrl_info->max_outbound_queues - 1);
  3476. max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
  3477. num_cpus = num_online_cpus();
  3478. num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
  3479. num_queue_groups = min(num_queue_groups, max_queue_groups);
  3480. ctrl_info->num_queue_groups = num_queue_groups;
  3481. /*
  3482. * Make sure that the max. inbound IU length is an even multiple
  3483. * of our inbound element length.
  3484. */
  3485. ctrl_info->max_inbound_iu_length =
  3486. (ctrl_info->max_inbound_iu_length_per_firmware /
  3487. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
  3488. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
  3489. num_elements_per_iq =
  3490. (ctrl_info->max_inbound_iu_length /
  3491. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  3492. /* Add one because one element in each queue is unusable. */
  3493. num_elements_per_iq++;
  3494. num_elements_per_iq = min(num_elements_per_iq,
  3495. ctrl_info->max_elements_per_iq);
  3496. num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
  3497. num_elements_per_oq = min(num_elements_per_oq,
  3498. ctrl_info->max_elements_per_oq);
  3499. ctrl_info->num_elements_per_iq = num_elements_per_iq;
  3500. ctrl_info->num_elements_per_oq = num_elements_per_oq;
  3501. ctrl_info->max_sg_per_iu =
  3502. ((ctrl_info->max_inbound_iu_length -
  3503. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
  3504. sizeof(struct pqi_sg_descriptor)) +
  3505. PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
  3506. }
  3507. static inline void pqi_set_sg_descriptor(
  3508. struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
  3509. {
  3510. u64 address = (u64)sg_dma_address(sg);
  3511. unsigned int length = sg_dma_len(sg);
  3512. put_unaligned_le64(address, &sg_descriptor->address);
  3513. put_unaligned_le32(length, &sg_descriptor->length);
  3514. put_unaligned_le32(0, &sg_descriptor->flags);
  3515. }
  3516. static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
  3517. struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
  3518. struct pqi_io_request *io_request)
  3519. {
  3520. int i;
  3521. u16 iu_length;
  3522. int sg_count;
  3523. bool chained;
  3524. unsigned int num_sg_in_iu;
  3525. unsigned int max_sg_per_iu;
  3526. struct scatterlist *sg;
  3527. struct pqi_sg_descriptor *sg_descriptor;
  3528. sg_count = scsi_dma_map(scmd);
  3529. if (sg_count < 0)
  3530. return sg_count;
  3531. iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
  3532. PQI_REQUEST_HEADER_LENGTH;
  3533. if (sg_count == 0)
  3534. goto out;
  3535. sg = scsi_sglist(scmd);
  3536. sg_descriptor = request->sg_descriptors;
  3537. max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
  3538. chained = false;
  3539. num_sg_in_iu = 0;
  3540. i = 0;
  3541. while (1) {
  3542. pqi_set_sg_descriptor(sg_descriptor, sg);
  3543. if (!chained)
  3544. num_sg_in_iu++;
  3545. i++;
  3546. if (i == sg_count)
  3547. break;
  3548. sg_descriptor++;
  3549. if (i == max_sg_per_iu) {
  3550. put_unaligned_le64(
  3551. (u64)io_request->sg_chain_buffer_dma_handle,
  3552. &sg_descriptor->address);
  3553. put_unaligned_le32((sg_count - num_sg_in_iu)
  3554. * sizeof(*sg_descriptor),
  3555. &sg_descriptor->length);
  3556. put_unaligned_le32(CISS_SG_CHAIN,
  3557. &sg_descriptor->flags);
  3558. chained = true;
  3559. num_sg_in_iu++;
  3560. sg_descriptor = io_request->sg_chain_buffer;
  3561. }
  3562. sg = sg_next(sg);
  3563. }
  3564. put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
  3565. request->partial = chained;
  3566. iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
  3567. out:
  3568. put_unaligned_le16(iu_length, &request->header.iu_length);
  3569. return 0;
  3570. }
  3571. static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
  3572. struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
  3573. struct pqi_io_request *io_request)
  3574. {
  3575. int i;
  3576. u16 iu_length;
  3577. int sg_count;
  3578. bool chained;
  3579. unsigned int num_sg_in_iu;
  3580. unsigned int max_sg_per_iu;
  3581. struct scatterlist *sg;
  3582. struct pqi_sg_descriptor *sg_descriptor;
  3583. sg_count = scsi_dma_map(scmd);
  3584. if (sg_count < 0)
  3585. return sg_count;
  3586. iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
  3587. PQI_REQUEST_HEADER_LENGTH;
  3588. num_sg_in_iu = 0;
  3589. if (sg_count == 0)
  3590. goto out;
  3591. sg = scsi_sglist(scmd);
  3592. sg_descriptor = request->sg_descriptors;
  3593. max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
  3594. chained = false;
  3595. i = 0;
  3596. while (1) {
  3597. pqi_set_sg_descriptor(sg_descriptor, sg);
  3598. if (!chained)
  3599. num_sg_in_iu++;
  3600. i++;
  3601. if (i == sg_count)
  3602. break;
  3603. sg_descriptor++;
  3604. if (i == max_sg_per_iu) {
  3605. put_unaligned_le64(
  3606. (u64)io_request->sg_chain_buffer_dma_handle,
  3607. &sg_descriptor->address);
  3608. put_unaligned_le32((sg_count - num_sg_in_iu)
  3609. * sizeof(*sg_descriptor),
  3610. &sg_descriptor->length);
  3611. put_unaligned_le32(CISS_SG_CHAIN,
  3612. &sg_descriptor->flags);
  3613. chained = true;
  3614. num_sg_in_iu++;
  3615. sg_descriptor = io_request->sg_chain_buffer;
  3616. }
  3617. sg = sg_next(sg);
  3618. }
  3619. put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
  3620. request->partial = chained;
  3621. iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
  3622. out:
  3623. put_unaligned_le16(iu_length, &request->header.iu_length);
  3624. request->num_sg_descriptors = num_sg_in_iu;
  3625. return 0;
  3626. }
  3627. static void pqi_raid_io_complete(struct pqi_io_request *io_request,
  3628. void *context)
  3629. {
  3630. struct scsi_cmnd *scmd;
  3631. scmd = io_request->scmd;
  3632. pqi_free_io_request(io_request);
  3633. scsi_dma_unmap(scmd);
  3634. pqi_scsi_done(scmd);
  3635. }
  3636. static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
  3637. struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
  3638. struct pqi_queue_group *queue_group)
  3639. {
  3640. int rc;
  3641. size_t cdb_length;
  3642. struct pqi_io_request *io_request;
  3643. struct pqi_raid_path_request *request;
  3644. io_request = pqi_alloc_io_request(ctrl_info);
  3645. io_request->io_complete_callback = pqi_raid_io_complete;
  3646. io_request->scmd = scmd;
  3647. scmd->host_scribble = (unsigned char *)io_request;
  3648. request = io_request->iu;
  3649. memset(request, 0,
  3650. offsetof(struct pqi_raid_path_request, sg_descriptors));
  3651. request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
  3652. put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
  3653. request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  3654. put_unaligned_le16(io_request->index, &request->request_id);
  3655. request->error_index = request->request_id;
  3656. memcpy(request->lun_number, device->scsi3addr,
  3657. sizeof(request->lun_number));
  3658. cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
  3659. memcpy(request->cdb, scmd->cmnd, cdb_length);
  3660. switch (cdb_length) {
  3661. case 6:
  3662. case 10:
  3663. case 12:
  3664. case 16:
  3665. /* No bytes in the Additional CDB bytes field */
  3666. request->additional_cdb_bytes_usage =
  3667. SOP_ADDITIONAL_CDB_BYTES_0;
  3668. break;
  3669. case 20:
  3670. /* 4 bytes in the Additional cdb field */
  3671. request->additional_cdb_bytes_usage =
  3672. SOP_ADDITIONAL_CDB_BYTES_4;
  3673. break;
  3674. case 24:
  3675. /* 8 bytes in the Additional cdb field */
  3676. request->additional_cdb_bytes_usage =
  3677. SOP_ADDITIONAL_CDB_BYTES_8;
  3678. break;
  3679. case 28:
  3680. /* 12 bytes in the Additional cdb field */
  3681. request->additional_cdb_bytes_usage =
  3682. SOP_ADDITIONAL_CDB_BYTES_12;
  3683. break;
  3684. case 32:
  3685. default:
  3686. /* 16 bytes in the Additional cdb field */
  3687. request->additional_cdb_bytes_usage =
  3688. SOP_ADDITIONAL_CDB_BYTES_16;
  3689. break;
  3690. }
  3691. switch (scmd->sc_data_direction) {
  3692. case DMA_TO_DEVICE:
  3693. request->data_direction = SOP_READ_FLAG;
  3694. break;
  3695. case DMA_FROM_DEVICE:
  3696. request->data_direction = SOP_WRITE_FLAG;
  3697. break;
  3698. case DMA_NONE:
  3699. request->data_direction = SOP_NO_DIRECTION_FLAG;
  3700. break;
  3701. case DMA_BIDIRECTIONAL:
  3702. request->data_direction = SOP_BIDIRECTIONAL;
  3703. break;
  3704. default:
  3705. dev_err(&ctrl_info->pci_dev->dev,
  3706. "unknown data direction: %d\n",
  3707. scmd->sc_data_direction);
  3708. WARN_ON(scmd->sc_data_direction);
  3709. break;
  3710. }
  3711. rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
  3712. if (rc) {
  3713. pqi_free_io_request(io_request);
  3714. return SCSI_MLQUEUE_HOST_BUSY;
  3715. }
  3716. pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
  3717. return 0;
  3718. }
  3719. static void pqi_aio_io_complete(struct pqi_io_request *io_request,
  3720. void *context)
  3721. {
  3722. struct scsi_cmnd *scmd;
  3723. scmd = io_request->scmd;
  3724. scsi_dma_unmap(scmd);
  3725. if (io_request->status == -EAGAIN)
  3726. set_host_byte(scmd, DID_IMM_RETRY);
  3727. pqi_free_io_request(io_request);
  3728. pqi_scsi_done(scmd);
  3729. }
  3730. static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
  3731. struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
  3732. struct pqi_queue_group *queue_group)
  3733. {
  3734. return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
  3735. scmd->cmnd, scmd->cmd_len, queue_group, NULL);
  3736. }
  3737. static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
  3738. struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
  3739. unsigned int cdb_length, struct pqi_queue_group *queue_group,
  3740. struct pqi_encryption_info *encryption_info)
  3741. {
  3742. int rc;
  3743. struct pqi_io_request *io_request;
  3744. struct pqi_aio_path_request *request;
  3745. io_request = pqi_alloc_io_request(ctrl_info);
  3746. io_request->io_complete_callback = pqi_aio_io_complete;
  3747. io_request->scmd = scmd;
  3748. scmd->host_scribble = (unsigned char *)io_request;
  3749. request = io_request->iu;
  3750. memset(request, 0,
  3751. offsetof(struct pqi_raid_path_request, sg_descriptors));
  3752. request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
  3753. put_unaligned_le32(aio_handle, &request->nexus_id);
  3754. put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
  3755. request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  3756. put_unaligned_le16(io_request->index, &request->request_id);
  3757. request->error_index = request->request_id;
  3758. if (cdb_length > sizeof(request->cdb))
  3759. cdb_length = sizeof(request->cdb);
  3760. request->cdb_length = cdb_length;
  3761. memcpy(request->cdb, cdb, cdb_length);
  3762. switch (scmd->sc_data_direction) {
  3763. case DMA_TO_DEVICE:
  3764. request->data_direction = SOP_READ_FLAG;
  3765. break;
  3766. case DMA_FROM_DEVICE:
  3767. request->data_direction = SOP_WRITE_FLAG;
  3768. break;
  3769. case DMA_NONE:
  3770. request->data_direction = SOP_NO_DIRECTION_FLAG;
  3771. break;
  3772. case DMA_BIDIRECTIONAL:
  3773. request->data_direction = SOP_BIDIRECTIONAL;
  3774. break;
  3775. default:
  3776. dev_err(&ctrl_info->pci_dev->dev,
  3777. "unknown data direction: %d\n",
  3778. scmd->sc_data_direction);
  3779. WARN_ON(scmd->sc_data_direction);
  3780. break;
  3781. }
  3782. if (encryption_info) {
  3783. request->encryption_enable = true;
  3784. put_unaligned_le16(encryption_info->data_encryption_key_index,
  3785. &request->data_encryption_key_index);
  3786. put_unaligned_le32(encryption_info->encrypt_tweak_lower,
  3787. &request->encrypt_tweak_lower);
  3788. put_unaligned_le32(encryption_info->encrypt_tweak_upper,
  3789. &request->encrypt_tweak_upper);
  3790. }
  3791. rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
  3792. if (rc) {
  3793. pqi_free_io_request(io_request);
  3794. return SCSI_MLQUEUE_HOST_BUSY;
  3795. }
  3796. pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
  3797. return 0;
  3798. }
  3799. static int pqi_scsi_queue_command(struct Scsi_Host *shost,
  3800. struct scsi_cmnd *scmd)
  3801. {
  3802. int rc;
  3803. struct pqi_ctrl_info *ctrl_info;
  3804. struct pqi_scsi_dev *device;
  3805. u16 hwq;
  3806. struct pqi_queue_group *queue_group;
  3807. bool raid_bypassed;
  3808. device = scmd->device->hostdata;
  3809. ctrl_info = shost_to_hba(shost);
  3810. if (pqi_ctrl_offline(ctrl_info)) {
  3811. set_host_byte(scmd, DID_NO_CONNECT);
  3812. pqi_scsi_done(scmd);
  3813. return 0;
  3814. }
  3815. /*
  3816. * This is necessary because the SML doesn't zero out this field during
  3817. * error recovery.
  3818. */
  3819. scmd->result = 0;
  3820. hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
  3821. if (hwq >= ctrl_info->num_queue_groups)
  3822. hwq = 0;
  3823. queue_group = &ctrl_info->queue_groups[hwq];
  3824. if (pqi_is_logical_device(device)) {
  3825. raid_bypassed = false;
  3826. if (device->offload_enabled &&
  3827. scmd->request->cmd_type == REQ_TYPE_FS) {
  3828. rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
  3829. scmd, queue_group);
  3830. if (rc == 0 ||
  3831. rc == SCSI_MLQUEUE_HOST_BUSY ||
  3832. rc == SAM_STAT_CHECK_CONDITION ||
  3833. rc == SAM_STAT_RESERVATION_CONFLICT)
  3834. raid_bypassed = true;
  3835. }
  3836. if (!raid_bypassed)
  3837. rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
  3838. queue_group);
  3839. } else {
  3840. if (device->aio_enabled)
  3841. rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
  3842. queue_group);
  3843. else
  3844. rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
  3845. queue_group);
  3846. }
  3847. return rc;
  3848. }
  3849. static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
  3850. void *context)
  3851. {
  3852. struct completion *waiting = context;
  3853. complete(waiting);
  3854. }
  3855. #define PQI_LUN_RESET_TIMEOUT_SECS 10
  3856. static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
  3857. struct pqi_scsi_dev *device, struct completion *wait)
  3858. {
  3859. int rc;
  3860. unsigned int wait_secs = 0;
  3861. while (1) {
  3862. if (wait_for_completion_io_timeout(wait,
  3863. PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
  3864. rc = 0;
  3865. break;
  3866. }
  3867. pqi_check_ctrl_health(ctrl_info);
  3868. if (pqi_ctrl_offline(ctrl_info)) {
  3869. rc = -ETIMEDOUT;
  3870. break;
  3871. }
  3872. wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
  3873. dev_err(&ctrl_info->pci_dev->dev,
  3874. "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
  3875. ctrl_info->scsi_host->host_no, device->bus,
  3876. device->target, device->lun, wait_secs);
  3877. }
  3878. return rc;
  3879. }
  3880. static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
  3881. struct pqi_scsi_dev *device)
  3882. {
  3883. int rc;
  3884. struct pqi_io_request *io_request;
  3885. DECLARE_COMPLETION_ONSTACK(wait);
  3886. struct pqi_task_management_request *request;
  3887. down(&ctrl_info->lun_reset_sem);
  3888. io_request = pqi_alloc_io_request(ctrl_info);
  3889. io_request->io_complete_callback = pqi_lun_reset_complete;
  3890. io_request->context = &wait;
  3891. request = io_request->iu;
  3892. memset(request, 0, sizeof(*request));
  3893. request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
  3894. put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
  3895. &request->header.iu_length);
  3896. put_unaligned_le16(io_request->index, &request->request_id);
  3897. memcpy(request->lun_number, device->scsi3addr,
  3898. sizeof(request->lun_number));
  3899. request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
  3900. pqi_start_io(ctrl_info,
  3901. &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
  3902. io_request);
  3903. rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
  3904. if (rc == 0)
  3905. rc = io_request->status;
  3906. pqi_free_io_request(io_request);
  3907. up(&ctrl_info->lun_reset_sem);
  3908. return rc;
  3909. }
  3910. /* Performs a reset at the LUN level. */
  3911. static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
  3912. struct pqi_scsi_dev *device)
  3913. {
  3914. int rc;
  3915. pqi_check_ctrl_health(ctrl_info);
  3916. if (pqi_ctrl_offline(ctrl_info))
  3917. return FAILED;
  3918. rc = pqi_lun_reset(ctrl_info, device);
  3919. return rc == 0 ? SUCCESS : FAILED;
  3920. }
  3921. static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
  3922. {
  3923. int rc;
  3924. struct pqi_ctrl_info *ctrl_info;
  3925. struct pqi_scsi_dev *device;
  3926. ctrl_info = shost_to_hba(scmd->device->host);
  3927. device = scmd->device->hostdata;
  3928. dev_err(&ctrl_info->pci_dev->dev,
  3929. "resetting scsi %d:%d:%d:%d\n",
  3930. ctrl_info->scsi_host->host_no,
  3931. device->bus, device->target, device->lun);
  3932. rc = pqi_device_reset(ctrl_info, device);
  3933. dev_err(&ctrl_info->pci_dev->dev,
  3934. "reset of scsi %d:%d:%d:%d: %s\n",
  3935. ctrl_info->scsi_host->host_no,
  3936. device->bus, device->target, device->lun,
  3937. rc == SUCCESS ? "SUCCESS" : "FAILED");
  3938. return rc;
  3939. }
  3940. static int pqi_slave_alloc(struct scsi_device *sdev)
  3941. {
  3942. struct pqi_scsi_dev *device;
  3943. unsigned long flags;
  3944. struct pqi_ctrl_info *ctrl_info;
  3945. struct scsi_target *starget;
  3946. struct sas_rphy *rphy;
  3947. ctrl_info = shost_to_hba(sdev->host);
  3948. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  3949. if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
  3950. starget = scsi_target(sdev);
  3951. rphy = target_to_rphy(starget);
  3952. device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
  3953. if (device) {
  3954. device->target = sdev_id(sdev);
  3955. device->lun = sdev->lun;
  3956. device->target_lun_valid = true;
  3957. }
  3958. } else {
  3959. device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
  3960. sdev_id(sdev), sdev->lun);
  3961. }
  3962. if (device && device->expose_device) {
  3963. sdev->hostdata = device;
  3964. device->sdev = sdev;
  3965. if (device->queue_depth) {
  3966. device->advertised_queue_depth = device->queue_depth;
  3967. scsi_change_queue_depth(sdev,
  3968. device->advertised_queue_depth);
  3969. }
  3970. }
  3971. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  3972. return 0;
  3973. }
  3974. static int pqi_slave_configure(struct scsi_device *sdev)
  3975. {
  3976. struct pqi_scsi_dev *device;
  3977. device = sdev->hostdata;
  3978. if (!device->expose_device)
  3979. sdev->no_uld_attach = true;
  3980. return 0;
  3981. }
  3982. static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
  3983. void __user *arg)
  3984. {
  3985. struct pci_dev *pci_dev;
  3986. u32 subsystem_vendor;
  3987. u32 subsystem_device;
  3988. cciss_pci_info_struct pciinfo;
  3989. if (!arg)
  3990. return -EINVAL;
  3991. pci_dev = ctrl_info->pci_dev;
  3992. pciinfo.domain = pci_domain_nr(pci_dev->bus);
  3993. pciinfo.bus = pci_dev->bus->number;
  3994. pciinfo.dev_fn = pci_dev->devfn;
  3995. subsystem_vendor = pci_dev->subsystem_vendor;
  3996. subsystem_device = pci_dev->subsystem_device;
  3997. pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
  3998. subsystem_vendor;
  3999. if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
  4000. return -EFAULT;
  4001. return 0;
  4002. }
  4003. static int pqi_getdrivver_ioctl(void __user *arg)
  4004. {
  4005. u32 version;
  4006. if (!arg)
  4007. return -EINVAL;
  4008. version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
  4009. (DRIVER_RELEASE << 16) | DRIVER_REVISION;
  4010. if (copy_to_user(arg, &version, sizeof(version)))
  4011. return -EFAULT;
  4012. return 0;
  4013. }
  4014. struct ciss_error_info {
  4015. u8 scsi_status;
  4016. int command_status;
  4017. size_t sense_data_length;
  4018. };
  4019. static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
  4020. struct ciss_error_info *ciss_error_info)
  4021. {
  4022. int ciss_cmd_status;
  4023. size_t sense_data_length;
  4024. switch (pqi_error_info->data_out_result) {
  4025. case PQI_DATA_IN_OUT_GOOD:
  4026. ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
  4027. break;
  4028. case PQI_DATA_IN_OUT_UNDERFLOW:
  4029. ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
  4030. break;
  4031. case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
  4032. ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
  4033. break;
  4034. case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
  4035. case PQI_DATA_IN_OUT_BUFFER_ERROR:
  4036. case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
  4037. case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
  4038. case PQI_DATA_IN_OUT_ERROR:
  4039. ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
  4040. break;
  4041. case PQI_DATA_IN_OUT_HARDWARE_ERROR:
  4042. case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
  4043. case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
  4044. case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
  4045. case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
  4046. case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
  4047. case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
  4048. case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
  4049. case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
  4050. case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
  4051. ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
  4052. break;
  4053. case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
  4054. ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
  4055. break;
  4056. case PQI_DATA_IN_OUT_ABORTED:
  4057. ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
  4058. break;
  4059. case PQI_DATA_IN_OUT_TIMEOUT:
  4060. ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
  4061. break;
  4062. default:
  4063. ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
  4064. break;
  4065. }
  4066. sense_data_length =
  4067. get_unaligned_le16(&pqi_error_info->sense_data_length);
  4068. if (sense_data_length == 0)
  4069. sense_data_length =
  4070. get_unaligned_le16(&pqi_error_info->response_data_length);
  4071. if (sense_data_length)
  4072. if (sense_data_length > sizeof(pqi_error_info->data))
  4073. sense_data_length = sizeof(pqi_error_info->data);
  4074. ciss_error_info->scsi_status = pqi_error_info->status;
  4075. ciss_error_info->command_status = ciss_cmd_status;
  4076. ciss_error_info->sense_data_length = sense_data_length;
  4077. }
  4078. static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
  4079. {
  4080. int rc;
  4081. char *kernel_buffer = NULL;
  4082. u16 iu_length;
  4083. size_t sense_data_length;
  4084. IOCTL_Command_struct iocommand;
  4085. struct pqi_raid_path_request request;
  4086. struct pqi_raid_error_info pqi_error_info;
  4087. struct ciss_error_info ciss_error_info;
  4088. if (pqi_ctrl_offline(ctrl_info))
  4089. return -ENXIO;
  4090. if (!arg)
  4091. return -EINVAL;
  4092. if (!capable(CAP_SYS_RAWIO))
  4093. return -EPERM;
  4094. if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
  4095. return -EFAULT;
  4096. if (iocommand.buf_size < 1 &&
  4097. iocommand.Request.Type.Direction != XFER_NONE)
  4098. return -EINVAL;
  4099. if (iocommand.Request.CDBLen > sizeof(request.cdb))
  4100. return -EINVAL;
  4101. if (iocommand.Request.Type.Type != TYPE_CMD)
  4102. return -EINVAL;
  4103. switch (iocommand.Request.Type.Direction) {
  4104. case XFER_NONE:
  4105. case XFER_WRITE:
  4106. case XFER_READ:
  4107. break;
  4108. default:
  4109. return -EINVAL;
  4110. }
  4111. if (iocommand.buf_size > 0) {
  4112. kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
  4113. if (!kernel_buffer)
  4114. return -ENOMEM;
  4115. if (iocommand.Request.Type.Direction & XFER_WRITE) {
  4116. if (copy_from_user(kernel_buffer, iocommand.buf,
  4117. iocommand.buf_size)) {
  4118. rc = -EFAULT;
  4119. goto out;
  4120. }
  4121. } else {
  4122. memset(kernel_buffer, 0, iocommand.buf_size);
  4123. }
  4124. }
  4125. memset(&request, 0, sizeof(request));
  4126. request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
  4127. iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
  4128. PQI_REQUEST_HEADER_LENGTH;
  4129. memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
  4130. sizeof(request.lun_number));
  4131. memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
  4132. request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
  4133. switch (iocommand.Request.Type.Direction) {
  4134. case XFER_NONE:
  4135. request.data_direction = SOP_NO_DIRECTION_FLAG;
  4136. break;
  4137. case XFER_WRITE:
  4138. request.data_direction = SOP_WRITE_FLAG;
  4139. break;
  4140. case XFER_READ:
  4141. request.data_direction = SOP_READ_FLAG;
  4142. break;
  4143. }
  4144. request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  4145. if (iocommand.buf_size > 0) {
  4146. put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
  4147. rc = pqi_map_single(ctrl_info->pci_dev,
  4148. &request.sg_descriptors[0], kernel_buffer,
  4149. iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
  4150. if (rc)
  4151. goto out;
  4152. iu_length += sizeof(request.sg_descriptors[0]);
  4153. }
  4154. put_unaligned_le16(iu_length, &request.header.iu_length);
  4155. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  4156. PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
  4157. if (iocommand.buf_size > 0)
  4158. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  4159. PCI_DMA_BIDIRECTIONAL);
  4160. memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
  4161. if (rc == 0) {
  4162. pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
  4163. iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
  4164. iocommand.error_info.CommandStatus =
  4165. ciss_error_info.command_status;
  4166. sense_data_length = ciss_error_info.sense_data_length;
  4167. if (sense_data_length) {
  4168. if (sense_data_length >
  4169. sizeof(iocommand.error_info.SenseInfo))
  4170. sense_data_length =
  4171. sizeof(iocommand.error_info.SenseInfo);
  4172. memcpy(iocommand.error_info.SenseInfo,
  4173. pqi_error_info.data, sense_data_length);
  4174. iocommand.error_info.SenseLen = sense_data_length;
  4175. }
  4176. }
  4177. if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
  4178. rc = -EFAULT;
  4179. goto out;
  4180. }
  4181. if (rc == 0 && iocommand.buf_size > 0 &&
  4182. (iocommand.Request.Type.Direction & XFER_READ)) {
  4183. if (copy_to_user(iocommand.buf, kernel_buffer,
  4184. iocommand.buf_size)) {
  4185. rc = -EFAULT;
  4186. }
  4187. }
  4188. out:
  4189. kfree(kernel_buffer);
  4190. return rc;
  4191. }
  4192. static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
  4193. {
  4194. int rc;
  4195. struct pqi_ctrl_info *ctrl_info;
  4196. ctrl_info = shost_to_hba(sdev->host);
  4197. switch (cmd) {
  4198. case CCISS_DEREGDISK:
  4199. case CCISS_REGNEWDISK:
  4200. case CCISS_REGNEWD:
  4201. rc = pqi_scan_scsi_devices(ctrl_info);
  4202. break;
  4203. case CCISS_GETPCIINFO:
  4204. rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
  4205. break;
  4206. case CCISS_GETDRIVVER:
  4207. rc = pqi_getdrivver_ioctl(arg);
  4208. break;
  4209. case CCISS_PASSTHRU:
  4210. rc = pqi_passthru_ioctl(ctrl_info, arg);
  4211. break;
  4212. default:
  4213. rc = -EINVAL;
  4214. break;
  4215. }
  4216. return rc;
  4217. }
  4218. static ssize_t pqi_version_show(struct device *dev,
  4219. struct device_attribute *attr, char *buffer)
  4220. {
  4221. ssize_t count = 0;
  4222. struct Scsi_Host *shost;
  4223. struct pqi_ctrl_info *ctrl_info;
  4224. shost = class_to_shost(dev);
  4225. ctrl_info = shost_to_hba(shost);
  4226. count += snprintf(buffer + count, PAGE_SIZE - count,
  4227. " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
  4228. count += snprintf(buffer + count, PAGE_SIZE - count,
  4229. "firmware: %s\n", ctrl_info->firmware_version);
  4230. return count;
  4231. }
  4232. static ssize_t pqi_host_rescan_store(struct device *dev,
  4233. struct device_attribute *attr, const char *buffer, size_t count)
  4234. {
  4235. struct Scsi_Host *shost = class_to_shost(dev);
  4236. pqi_scan_start(shost);
  4237. return count;
  4238. }
  4239. static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
  4240. static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
  4241. static struct device_attribute *pqi_shost_attrs[] = {
  4242. &dev_attr_version,
  4243. &dev_attr_rescan,
  4244. NULL
  4245. };
  4246. static ssize_t pqi_sas_address_show(struct device *dev,
  4247. struct device_attribute *attr, char *buffer)
  4248. {
  4249. struct pqi_ctrl_info *ctrl_info;
  4250. struct scsi_device *sdev;
  4251. struct pqi_scsi_dev *device;
  4252. unsigned long flags;
  4253. u64 sas_address;
  4254. sdev = to_scsi_device(dev);
  4255. ctrl_info = shost_to_hba(sdev->host);
  4256. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  4257. device = sdev->hostdata;
  4258. if (pqi_is_logical_device(device)) {
  4259. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
  4260. flags);
  4261. return -ENODEV;
  4262. }
  4263. sas_address = device->sas_address;
  4264. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  4265. return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
  4266. }
  4267. static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
  4268. struct device_attribute *attr, char *buffer)
  4269. {
  4270. struct pqi_ctrl_info *ctrl_info;
  4271. struct scsi_device *sdev;
  4272. struct pqi_scsi_dev *device;
  4273. unsigned long flags;
  4274. sdev = to_scsi_device(dev);
  4275. ctrl_info = shost_to_hba(sdev->host);
  4276. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  4277. device = sdev->hostdata;
  4278. buffer[0] = device->offload_enabled ? '1' : '0';
  4279. buffer[1] = '\n';
  4280. buffer[2] = '\0';
  4281. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  4282. return 2;
  4283. }
  4284. static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
  4285. static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
  4286. pqi_ssd_smart_path_enabled_show, NULL);
  4287. static struct device_attribute *pqi_sdev_attrs[] = {
  4288. &dev_attr_sas_address,
  4289. &dev_attr_ssd_smart_path_enabled,
  4290. NULL
  4291. };
  4292. static struct scsi_host_template pqi_driver_template = {
  4293. .module = THIS_MODULE,
  4294. .name = DRIVER_NAME_SHORT,
  4295. .proc_name = DRIVER_NAME_SHORT,
  4296. .queuecommand = pqi_scsi_queue_command,
  4297. .scan_start = pqi_scan_start,
  4298. .scan_finished = pqi_scan_finished,
  4299. .this_id = -1,
  4300. .use_clustering = ENABLE_CLUSTERING,
  4301. .eh_device_reset_handler = pqi_eh_device_reset_handler,
  4302. .ioctl = pqi_ioctl,
  4303. .slave_alloc = pqi_slave_alloc,
  4304. .slave_configure = pqi_slave_configure,
  4305. .sdev_attrs = pqi_sdev_attrs,
  4306. .shost_attrs = pqi_shost_attrs,
  4307. };
  4308. static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
  4309. {
  4310. int rc;
  4311. struct Scsi_Host *shost;
  4312. shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
  4313. if (!shost) {
  4314. dev_err(&ctrl_info->pci_dev->dev,
  4315. "scsi_host_alloc failed for controller %u\n",
  4316. ctrl_info->ctrl_id);
  4317. return -ENOMEM;
  4318. }
  4319. shost->io_port = 0;
  4320. shost->n_io_port = 0;
  4321. shost->this_id = -1;
  4322. shost->max_channel = PQI_MAX_BUS;
  4323. shost->max_cmd_len = MAX_COMMAND_SIZE;
  4324. shost->max_lun = ~0;
  4325. shost->max_id = ~0;
  4326. shost->max_sectors = ctrl_info->max_sectors;
  4327. shost->can_queue = ctrl_info->scsi_ml_can_queue;
  4328. shost->cmd_per_lun = shost->can_queue;
  4329. shost->sg_tablesize = ctrl_info->sg_tablesize;
  4330. shost->transportt = pqi_sas_transport_template;
  4331. shost->irq = ctrl_info->msix_vectors[0];
  4332. shost->unique_id = shost->irq;
  4333. shost->nr_hw_queues = ctrl_info->num_queue_groups;
  4334. shost->hostdata[0] = (unsigned long)ctrl_info;
  4335. rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
  4336. if (rc) {
  4337. dev_err(&ctrl_info->pci_dev->dev,
  4338. "scsi_add_host failed for controller %u\n",
  4339. ctrl_info->ctrl_id);
  4340. goto free_host;
  4341. }
  4342. rc = pqi_add_sas_host(shost, ctrl_info);
  4343. if (rc) {
  4344. dev_err(&ctrl_info->pci_dev->dev,
  4345. "add SAS host failed for controller %u\n",
  4346. ctrl_info->ctrl_id);
  4347. goto remove_host;
  4348. }
  4349. ctrl_info->scsi_host = shost;
  4350. return 0;
  4351. remove_host:
  4352. scsi_remove_host(shost);
  4353. free_host:
  4354. scsi_host_put(shost);
  4355. return rc;
  4356. }
  4357. static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
  4358. {
  4359. struct Scsi_Host *shost;
  4360. pqi_delete_sas_host(ctrl_info);
  4361. shost = ctrl_info->scsi_host;
  4362. if (!shost)
  4363. return;
  4364. scsi_remove_host(shost);
  4365. scsi_host_put(shost);
  4366. }
  4367. #define PQI_RESET_ACTION_RESET 0x1
  4368. #define PQI_RESET_TYPE_NO_RESET 0x0
  4369. #define PQI_RESET_TYPE_SOFT_RESET 0x1
  4370. #define PQI_RESET_TYPE_FIRM_RESET 0x2
  4371. #define PQI_RESET_TYPE_HARD_RESET 0x3
  4372. static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
  4373. {
  4374. int rc;
  4375. u32 reset_params;
  4376. reset_params = (PQI_RESET_ACTION_RESET << 5) |
  4377. PQI_RESET_TYPE_HARD_RESET;
  4378. writel(reset_params,
  4379. &ctrl_info->pqi_registers->device_reset);
  4380. rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
  4381. if (rc)
  4382. dev_err(&ctrl_info->pci_dev->dev,
  4383. "PQI reset failed\n");
  4384. return rc;
  4385. }
  4386. static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
  4387. {
  4388. int rc;
  4389. struct bmic_identify_controller *identify;
  4390. identify = kmalloc(sizeof(*identify), GFP_KERNEL);
  4391. if (!identify)
  4392. return -ENOMEM;
  4393. rc = pqi_identify_controller(ctrl_info, identify);
  4394. if (rc)
  4395. goto out;
  4396. memcpy(ctrl_info->firmware_version, identify->firmware_version,
  4397. sizeof(identify->firmware_version));
  4398. ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
  4399. snprintf(ctrl_info->firmware_version +
  4400. strlen(ctrl_info->firmware_version),
  4401. sizeof(ctrl_info->firmware_version),
  4402. "-%u", get_unaligned_le16(&identify->firmware_build_number));
  4403. out:
  4404. kfree(identify);
  4405. return rc;
  4406. }
  4407. static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
  4408. {
  4409. if (!sis_is_firmware_running(ctrl_info))
  4410. return -ENXIO;
  4411. if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
  4412. sis_disable_msix(ctrl_info);
  4413. if (pqi_reset(ctrl_info) == 0)
  4414. sis_reenable_sis_mode(ctrl_info);
  4415. }
  4416. return 0;
  4417. }
  4418. static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
  4419. {
  4420. int rc;
  4421. if (reset_devices) {
  4422. rc = pqi_kdump_init(ctrl_info);
  4423. if (rc)
  4424. return rc;
  4425. }
  4426. /*
  4427. * When the controller comes out of reset, it is always running
  4428. * in legacy SIS mode. This is so that it can be compatible
  4429. * with legacy drivers shipped with OSes. So we have to talk
  4430. * to it using SIS commands at first. Once we are satisified
  4431. * that the controller supports PQI, we transition it into PQI
  4432. * mode.
  4433. */
  4434. /*
  4435. * Wait until the controller is ready to start accepting SIS
  4436. * commands.
  4437. */
  4438. rc = sis_wait_for_ctrl_ready(ctrl_info);
  4439. if (rc) {
  4440. dev_err(&ctrl_info->pci_dev->dev,
  4441. "error initializing SIS interface\n");
  4442. return rc;
  4443. }
  4444. /*
  4445. * Get the controller properties. This allows us to determine
  4446. * whether or not it supports PQI mode.
  4447. */
  4448. rc = sis_get_ctrl_properties(ctrl_info);
  4449. if (rc) {
  4450. dev_err(&ctrl_info->pci_dev->dev,
  4451. "error obtaining controller properties\n");
  4452. return rc;
  4453. }
  4454. rc = sis_get_pqi_capabilities(ctrl_info);
  4455. if (rc) {
  4456. dev_err(&ctrl_info->pci_dev->dev,
  4457. "error obtaining controller capabilities\n");
  4458. return rc;
  4459. }
  4460. if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
  4461. ctrl_info->max_outstanding_requests =
  4462. PQI_MAX_OUTSTANDING_REQUESTS;
  4463. pqi_calculate_io_resources(ctrl_info);
  4464. rc = pqi_alloc_error_buffer(ctrl_info);
  4465. if (rc) {
  4466. dev_err(&ctrl_info->pci_dev->dev,
  4467. "failed to allocate PQI error buffer\n");
  4468. return rc;
  4469. }
  4470. /*
  4471. * If the function we are about to call succeeds, the
  4472. * controller will transition from legacy SIS mode
  4473. * into PQI mode.
  4474. */
  4475. rc = sis_init_base_struct_addr(ctrl_info);
  4476. if (rc) {
  4477. dev_err(&ctrl_info->pci_dev->dev,
  4478. "error initializing PQI mode\n");
  4479. return rc;
  4480. }
  4481. /* Wait for the controller to complete the SIS -> PQI transition. */
  4482. rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
  4483. if (rc) {
  4484. dev_err(&ctrl_info->pci_dev->dev,
  4485. "transition to PQI mode failed\n");
  4486. return rc;
  4487. }
  4488. /* From here on, we are running in PQI mode. */
  4489. ctrl_info->pqi_mode_enabled = true;
  4490. pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
  4491. rc = pqi_alloc_admin_queues(ctrl_info);
  4492. if (rc) {
  4493. dev_err(&ctrl_info->pci_dev->dev,
  4494. "error allocating admin queues\n");
  4495. return rc;
  4496. }
  4497. rc = pqi_create_admin_queues(ctrl_info);
  4498. if (rc) {
  4499. dev_err(&ctrl_info->pci_dev->dev,
  4500. "error creating admin queues\n");
  4501. return rc;
  4502. }
  4503. rc = pqi_report_device_capability(ctrl_info);
  4504. if (rc) {
  4505. dev_err(&ctrl_info->pci_dev->dev,
  4506. "obtaining device capability failed\n");
  4507. return rc;
  4508. }
  4509. rc = pqi_validate_device_capability(ctrl_info);
  4510. if (rc)
  4511. return rc;
  4512. pqi_calculate_queue_resources(ctrl_info);
  4513. rc = pqi_enable_msix_interrupts(ctrl_info);
  4514. if (rc)
  4515. return rc;
  4516. if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
  4517. ctrl_info->max_msix_vectors =
  4518. ctrl_info->num_msix_vectors_enabled;
  4519. pqi_calculate_queue_resources(ctrl_info);
  4520. }
  4521. rc = pqi_alloc_io_resources(ctrl_info);
  4522. if (rc)
  4523. return rc;
  4524. rc = pqi_alloc_operational_queues(ctrl_info);
  4525. if (rc)
  4526. return rc;
  4527. pqi_init_operational_queues(ctrl_info);
  4528. rc = pqi_request_irqs(ctrl_info);
  4529. if (rc)
  4530. return rc;
  4531. pqi_irq_set_affinity_hint(ctrl_info);
  4532. rc = pqi_create_queues(ctrl_info);
  4533. if (rc)
  4534. return rc;
  4535. sis_enable_msix(ctrl_info);
  4536. rc = pqi_configure_events(ctrl_info);
  4537. if (rc) {
  4538. dev_err(&ctrl_info->pci_dev->dev,
  4539. "error configuring events\n");
  4540. return rc;
  4541. }
  4542. pqi_start_heartbeat_timer(ctrl_info);
  4543. ctrl_info->controller_online = true;
  4544. /* Register with the SCSI subsystem. */
  4545. rc = pqi_register_scsi(ctrl_info);
  4546. if (rc)
  4547. return rc;
  4548. rc = pqi_get_ctrl_firmware_version(ctrl_info);
  4549. if (rc) {
  4550. dev_err(&ctrl_info->pci_dev->dev,
  4551. "error obtaining firmware version\n");
  4552. return rc;
  4553. }
  4554. rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
  4555. if (rc) {
  4556. dev_err(&ctrl_info->pci_dev->dev,
  4557. "error updating host wellness\n");
  4558. return rc;
  4559. }
  4560. pqi_schedule_update_time_worker(ctrl_info);
  4561. pqi_scan_scsi_devices(ctrl_info);
  4562. return 0;
  4563. }
  4564. static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
  4565. {
  4566. int rc;
  4567. u64 mask;
  4568. rc = pci_enable_device(ctrl_info->pci_dev);
  4569. if (rc) {
  4570. dev_err(&ctrl_info->pci_dev->dev,
  4571. "failed to enable PCI device\n");
  4572. return rc;
  4573. }
  4574. if (sizeof(dma_addr_t) > 4)
  4575. mask = DMA_BIT_MASK(64);
  4576. else
  4577. mask = DMA_BIT_MASK(32);
  4578. rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
  4579. if (rc) {
  4580. dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
  4581. goto disable_device;
  4582. }
  4583. rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
  4584. if (rc) {
  4585. dev_err(&ctrl_info->pci_dev->dev,
  4586. "failed to obtain PCI resources\n");
  4587. goto disable_device;
  4588. }
  4589. ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
  4590. ctrl_info->pci_dev, 0),
  4591. sizeof(struct pqi_ctrl_registers));
  4592. if (!ctrl_info->iomem_base) {
  4593. dev_err(&ctrl_info->pci_dev->dev,
  4594. "failed to map memory for controller registers\n");
  4595. rc = -ENOMEM;
  4596. goto release_regions;
  4597. }
  4598. ctrl_info->registers = ctrl_info->iomem_base;
  4599. ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
  4600. /* Enable bus mastering. */
  4601. pci_set_master(ctrl_info->pci_dev);
  4602. pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
  4603. return 0;
  4604. release_regions:
  4605. pci_release_regions(ctrl_info->pci_dev);
  4606. disable_device:
  4607. pci_disable_device(ctrl_info->pci_dev);
  4608. return rc;
  4609. }
  4610. static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
  4611. {
  4612. iounmap(ctrl_info->iomem_base);
  4613. pci_release_regions(ctrl_info->pci_dev);
  4614. pci_disable_device(ctrl_info->pci_dev);
  4615. pci_set_drvdata(ctrl_info->pci_dev, NULL);
  4616. }
  4617. static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
  4618. {
  4619. struct pqi_ctrl_info *ctrl_info;
  4620. ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
  4621. GFP_KERNEL, numa_node);
  4622. if (!ctrl_info)
  4623. return NULL;
  4624. mutex_init(&ctrl_info->scan_mutex);
  4625. INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
  4626. spin_lock_init(&ctrl_info->scsi_device_list_lock);
  4627. INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
  4628. atomic_set(&ctrl_info->num_interrupts, 0);
  4629. INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
  4630. INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
  4631. sema_init(&ctrl_info->sync_request_sem,
  4632. PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
  4633. sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
  4634. ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
  4635. ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
  4636. return ctrl_info;
  4637. }
  4638. static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
  4639. {
  4640. kfree(ctrl_info);
  4641. }
  4642. static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
  4643. {
  4644. pqi_irq_unset_affinity_hint(ctrl_info);
  4645. pqi_free_irqs(ctrl_info);
  4646. if (ctrl_info->num_msix_vectors_enabled)
  4647. pci_disable_msix(ctrl_info->pci_dev);
  4648. }
  4649. static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
  4650. {
  4651. pqi_stop_heartbeat_timer(ctrl_info);
  4652. pqi_free_interrupts(ctrl_info);
  4653. if (ctrl_info->queue_memory_base)
  4654. dma_free_coherent(&ctrl_info->pci_dev->dev,
  4655. ctrl_info->queue_memory_length,
  4656. ctrl_info->queue_memory_base,
  4657. ctrl_info->queue_memory_base_dma_handle);
  4658. if (ctrl_info->admin_queue_memory_base)
  4659. dma_free_coherent(&ctrl_info->pci_dev->dev,
  4660. ctrl_info->admin_queue_memory_length,
  4661. ctrl_info->admin_queue_memory_base,
  4662. ctrl_info->admin_queue_memory_base_dma_handle);
  4663. pqi_free_all_io_requests(ctrl_info);
  4664. if (ctrl_info->error_buffer)
  4665. dma_free_coherent(&ctrl_info->pci_dev->dev,
  4666. ctrl_info->error_buffer_length,
  4667. ctrl_info->error_buffer,
  4668. ctrl_info->error_buffer_dma_handle);
  4669. if (ctrl_info->iomem_base)
  4670. pqi_cleanup_pci_init(ctrl_info);
  4671. pqi_free_ctrl_info(ctrl_info);
  4672. }
  4673. static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
  4674. {
  4675. cancel_delayed_work_sync(&ctrl_info->rescan_work);
  4676. cancel_delayed_work_sync(&ctrl_info->update_time_work);
  4677. pqi_remove_all_scsi_devices(ctrl_info);
  4678. pqi_unregister_scsi(ctrl_info);
  4679. if (ctrl_info->pqi_mode_enabled) {
  4680. sis_disable_msix(ctrl_info);
  4681. if (pqi_reset(ctrl_info) == 0)
  4682. sis_reenable_sis_mode(ctrl_info);
  4683. }
  4684. pqi_free_ctrl_resources(ctrl_info);
  4685. }
  4686. static void pqi_print_ctrl_info(struct pci_dev *pdev,
  4687. const struct pci_device_id *id)
  4688. {
  4689. char *ctrl_description;
  4690. if (id->driver_data) {
  4691. ctrl_description = (char *)id->driver_data;
  4692. } else {
  4693. switch (id->subvendor) {
  4694. case PCI_VENDOR_ID_HP:
  4695. ctrl_description = hpe_branded_controller;
  4696. break;
  4697. case PCI_VENDOR_ID_ADAPTEC2:
  4698. default:
  4699. ctrl_description = microsemi_branded_controller;
  4700. break;
  4701. }
  4702. }
  4703. dev_info(&pdev->dev, "%s found\n", ctrl_description);
  4704. }
  4705. static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  4706. {
  4707. int rc;
  4708. int node;
  4709. struct pqi_ctrl_info *ctrl_info;
  4710. pqi_print_ctrl_info(pdev, id);
  4711. if (pqi_disable_device_id_wildcards &&
  4712. id->subvendor == PCI_ANY_ID &&
  4713. id->subdevice == PCI_ANY_ID) {
  4714. dev_warn(&pdev->dev,
  4715. "controller not probed because device ID wildcards are disabled\n");
  4716. return -ENODEV;
  4717. }
  4718. if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
  4719. dev_warn(&pdev->dev,
  4720. "controller device ID matched using wildcards\n");
  4721. node = dev_to_node(&pdev->dev);
  4722. if (node == NUMA_NO_NODE)
  4723. set_dev_node(&pdev->dev, 0);
  4724. ctrl_info = pqi_alloc_ctrl_info(node);
  4725. if (!ctrl_info) {
  4726. dev_err(&pdev->dev,
  4727. "failed to allocate controller info block\n");
  4728. return -ENOMEM;
  4729. }
  4730. ctrl_info->pci_dev = pdev;
  4731. rc = pqi_pci_init(ctrl_info);
  4732. if (rc)
  4733. goto error;
  4734. rc = pqi_ctrl_init(ctrl_info);
  4735. if (rc)
  4736. goto error;
  4737. return 0;
  4738. error:
  4739. pqi_remove_ctrl(ctrl_info);
  4740. return rc;
  4741. }
  4742. static void pqi_pci_remove(struct pci_dev *pdev)
  4743. {
  4744. struct pqi_ctrl_info *ctrl_info;
  4745. ctrl_info = pci_get_drvdata(pdev);
  4746. if (!ctrl_info)
  4747. return;
  4748. pqi_remove_ctrl(ctrl_info);
  4749. }
  4750. static void pqi_shutdown(struct pci_dev *pdev)
  4751. {
  4752. int rc;
  4753. struct pqi_ctrl_info *ctrl_info;
  4754. ctrl_info = pci_get_drvdata(pdev);
  4755. if (!ctrl_info)
  4756. goto error;
  4757. /*
  4758. * Write all data in the controller's battery-backed cache to
  4759. * storage.
  4760. */
  4761. rc = pqi_flush_cache(ctrl_info);
  4762. if (rc == 0)
  4763. return;
  4764. error:
  4765. dev_warn(&pdev->dev,
  4766. "unable to flush controller cache\n");
  4767. }
  4768. /* Define the PCI IDs for the controllers that we support. */
  4769. static const struct pci_device_id pqi_pci_id_table[] = {
  4770. {
  4771. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4772. PCI_VENDOR_ID_ADAPTEC2, 0x0110)
  4773. },
  4774. {
  4775. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4776. PCI_VENDOR_ID_HP, 0x0600)
  4777. },
  4778. {
  4779. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4780. PCI_VENDOR_ID_HP, 0x0601)
  4781. },
  4782. {
  4783. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4784. PCI_VENDOR_ID_HP, 0x0602)
  4785. },
  4786. {
  4787. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4788. PCI_VENDOR_ID_HP, 0x0603)
  4789. },
  4790. {
  4791. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4792. PCI_VENDOR_ID_HP, 0x0650)
  4793. },
  4794. {
  4795. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4796. PCI_VENDOR_ID_HP, 0x0651)
  4797. },
  4798. {
  4799. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4800. PCI_VENDOR_ID_HP, 0x0652)
  4801. },
  4802. {
  4803. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4804. PCI_VENDOR_ID_HP, 0x0653)
  4805. },
  4806. {
  4807. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4808. PCI_VENDOR_ID_HP, 0x0654)
  4809. },
  4810. {
  4811. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4812. PCI_VENDOR_ID_HP, 0x0655)
  4813. },
  4814. {
  4815. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4816. PCI_VENDOR_ID_HP, 0x0700)
  4817. },
  4818. {
  4819. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4820. PCI_VENDOR_ID_HP, 0x0701)
  4821. },
  4822. {
  4823. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4824. PCI_VENDOR_ID_ADAPTEC2, 0x0800)
  4825. },
  4826. {
  4827. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4828. PCI_VENDOR_ID_ADAPTEC2, 0x0801)
  4829. },
  4830. {
  4831. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4832. PCI_VENDOR_ID_ADAPTEC2, 0x0802)
  4833. },
  4834. {
  4835. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4836. PCI_VENDOR_ID_ADAPTEC2, 0x0803)
  4837. },
  4838. {
  4839. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4840. PCI_VENDOR_ID_ADAPTEC2, 0x0804)
  4841. },
  4842. {
  4843. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4844. PCI_VENDOR_ID_ADAPTEC2, 0x0805)
  4845. },
  4846. {
  4847. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4848. PCI_VENDOR_ID_ADAPTEC2, 0x0900)
  4849. },
  4850. {
  4851. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4852. PCI_VENDOR_ID_ADAPTEC2, 0x0901)
  4853. },
  4854. {
  4855. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4856. PCI_VENDOR_ID_ADAPTEC2, 0x0902)
  4857. },
  4858. {
  4859. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4860. PCI_VENDOR_ID_ADAPTEC2, 0x0903)
  4861. },
  4862. {
  4863. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4864. PCI_VENDOR_ID_ADAPTEC2, 0x0904)
  4865. },
  4866. {
  4867. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4868. PCI_VENDOR_ID_ADAPTEC2, 0x0905)
  4869. },
  4870. {
  4871. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4872. PCI_VENDOR_ID_ADAPTEC2, 0x0906)
  4873. },
  4874. {
  4875. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4876. PCI_VENDOR_ID_HP, 0x1001)
  4877. },
  4878. {
  4879. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4880. PCI_VENDOR_ID_HP, 0x1100)
  4881. },
  4882. {
  4883. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4884. PCI_VENDOR_ID_HP, 0x1101)
  4885. },
  4886. {
  4887. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4888. PCI_VENDOR_ID_HP, 0x1102)
  4889. },
  4890. {
  4891. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4892. PCI_VENDOR_ID_HP, 0x1150)
  4893. },
  4894. {
  4895. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4896. PCI_ANY_ID, PCI_ANY_ID)
  4897. },
  4898. { 0 }
  4899. };
  4900. MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
  4901. static struct pci_driver pqi_pci_driver = {
  4902. .name = DRIVER_NAME_SHORT,
  4903. .id_table = pqi_pci_id_table,
  4904. .probe = pqi_pci_probe,
  4905. .remove = pqi_pci_remove,
  4906. .shutdown = pqi_shutdown,
  4907. };
  4908. static int __init pqi_init(void)
  4909. {
  4910. int rc;
  4911. pr_info(DRIVER_NAME "\n");
  4912. pqi_sas_transport_template =
  4913. sas_attach_transport(&pqi_sas_transport_functions);
  4914. if (!pqi_sas_transport_template)
  4915. return -ENODEV;
  4916. rc = pci_register_driver(&pqi_pci_driver);
  4917. if (rc)
  4918. sas_release_transport(pqi_sas_transport_template);
  4919. return rc;
  4920. }
  4921. static void __exit pqi_cleanup(void)
  4922. {
  4923. pci_unregister_driver(&pqi_pci_driver);
  4924. sas_release_transport(pqi_sas_transport_template);
  4925. }
  4926. module_init(pqi_init);
  4927. module_exit(pqi_cleanup);
  4928. static void __attribute__((unused)) verify_structures(void)
  4929. {
  4930. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4931. sis_host_to_ctrl_doorbell) != 0x20);
  4932. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4933. sis_interrupt_mask) != 0x34);
  4934. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4935. sis_ctrl_to_host_doorbell) != 0x9c);
  4936. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4937. sis_ctrl_to_host_doorbell_clear) != 0xa0);
  4938. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4939. sis_driver_scratch) != 0xb0);
  4940. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4941. sis_firmware_status) != 0xbc);
  4942. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4943. sis_mailbox) != 0x1000);
  4944. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4945. pqi_registers) != 0x4000);
  4946. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4947. iu_type) != 0x0);
  4948. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4949. iu_length) != 0x2);
  4950. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4951. response_queue_id) != 0x4);
  4952. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4953. work_area) != 0x6);
  4954. BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
  4955. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4956. status) != 0x0);
  4957. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4958. service_response) != 0x1);
  4959. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4960. data_present) != 0x2);
  4961. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4962. reserved) != 0x3);
  4963. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4964. residual_count) != 0x4);
  4965. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4966. data_length) != 0x8);
  4967. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4968. reserved1) != 0xa);
  4969. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4970. data) != 0xc);
  4971. BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
  4972. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4973. data_in_result) != 0x0);
  4974. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4975. data_out_result) != 0x1);
  4976. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4977. reserved) != 0x2);
  4978. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4979. status) != 0x5);
  4980. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4981. status_qualifier) != 0x6);
  4982. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4983. sense_data_length) != 0x8);
  4984. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4985. response_data_length) != 0xa);
  4986. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4987. data_in_transferred) != 0xc);
  4988. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4989. data_out_transferred) != 0x10);
  4990. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4991. data) != 0x14);
  4992. BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
  4993. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4994. signature) != 0x0);
  4995. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4996. function_and_status_code) != 0x8);
  4997. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4998. max_admin_iq_elements) != 0x10);
  4999. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5000. max_admin_oq_elements) != 0x11);
  5001. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5002. admin_iq_element_length) != 0x12);
  5003. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5004. admin_oq_element_length) != 0x13);
  5005. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5006. max_reset_timeout) != 0x14);
  5007. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5008. legacy_intx_status) != 0x18);
  5009. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5010. legacy_intx_mask_set) != 0x1c);
  5011. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5012. legacy_intx_mask_clear) != 0x20);
  5013. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5014. device_status) != 0x40);
  5015. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5016. admin_iq_pi_offset) != 0x48);
  5017. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5018. admin_oq_ci_offset) != 0x50);
  5019. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5020. admin_iq_element_array_addr) != 0x58);
  5021. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5022. admin_oq_element_array_addr) != 0x60);
  5023. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5024. admin_iq_ci_addr) != 0x68);
  5025. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5026. admin_oq_pi_addr) != 0x70);
  5027. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5028. admin_iq_num_elements) != 0x78);
  5029. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5030. admin_oq_num_elements) != 0x79);
  5031. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5032. admin_queue_int_msg_num) != 0x7a);
  5033. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5034. device_error) != 0x80);
  5035. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5036. error_details) != 0x88);
  5037. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5038. device_reset) != 0x90);
  5039. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5040. power_action) != 0x94);
  5041. BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
  5042. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5043. header.iu_type) != 0);
  5044. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5045. header.iu_length) != 2);
  5046. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5047. header.work_area) != 6);
  5048. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5049. request_id) != 8);
  5050. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5051. function_code) != 10);
  5052. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5053. data.report_device_capability.buffer_length) != 44);
  5054. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5055. data.report_device_capability.sg_descriptor) != 48);
  5056. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5057. data.create_operational_iq.queue_id) != 12);
  5058. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5059. data.create_operational_iq.element_array_addr) != 16);
  5060. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5061. data.create_operational_iq.ci_addr) != 24);
  5062. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5063. data.create_operational_iq.num_elements) != 32);
  5064. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5065. data.create_operational_iq.element_length) != 34);
  5066. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5067. data.create_operational_iq.queue_protocol) != 36);
  5068. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5069. data.create_operational_oq.queue_id) != 12);
  5070. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5071. data.create_operational_oq.element_array_addr) != 16);
  5072. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5073. data.create_operational_oq.pi_addr) != 24);
  5074. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5075. data.create_operational_oq.num_elements) != 32);
  5076. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5077. data.create_operational_oq.element_length) != 34);
  5078. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5079. data.create_operational_oq.queue_protocol) != 36);
  5080. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5081. data.create_operational_oq.int_msg_num) != 40);
  5082. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5083. data.create_operational_oq.coalescing_count) != 42);
  5084. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5085. data.create_operational_oq.min_coalescing_time) != 44);
  5086. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5087. data.create_operational_oq.max_coalescing_time) != 48);
  5088. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5089. data.delete_operational_queue.queue_id) != 12);
  5090. BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
  5091. BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
  5092. data.create_operational_iq) != 64 - 11);
  5093. BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
  5094. data.create_operational_oq) != 64 - 11);
  5095. BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
  5096. data.delete_operational_queue) != 64 - 11);
  5097. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5098. header.iu_type) != 0);
  5099. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5100. header.iu_length) != 2);
  5101. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5102. header.work_area) != 6);
  5103. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5104. request_id) != 8);
  5105. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5106. function_code) != 10);
  5107. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5108. status) != 11);
  5109. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5110. data.create_operational_iq.status_descriptor) != 12);
  5111. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5112. data.create_operational_iq.iq_pi_offset) != 16);
  5113. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5114. data.create_operational_oq.status_descriptor) != 12);
  5115. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5116. data.create_operational_oq.oq_ci_offset) != 16);
  5117. BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
  5118. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5119. header.iu_type) != 0);
  5120. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5121. header.iu_length) != 2);
  5122. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5123. header.response_queue_id) != 4);
  5124. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5125. header.work_area) != 6);
  5126. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5127. request_id) != 8);
  5128. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5129. nexus_id) != 10);
  5130. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5131. buffer_length) != 12);
  5132. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5133. lun_number) != 16);
  5134. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5135. protocol_specific) != 24);
  5136. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5137. error_index) != 27);
  5138. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5139. cdb) != 32);
  5140. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5141. sg_descriptors) != 64);
  5142. BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
  5143. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  5144. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5145. header.iu_type) != 0);
  5146. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5147. header.iu_length) != 2);
  5148. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5149. header.response_queue_id) != 4);
  5150. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5151. header.work_area) != 6);
  5152. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5153. request_id) != 8);
  5154. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5155. nexus_id) != 12);
  5156. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5157. buffer_length) != 16);
  5158. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5159. data_encryption_key_index) != 22);
  5160. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5161. encrypt_tweak_lower) != 24);
  5162. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5163. encrypt_tweak_upper) != 28);
  5164. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5165. cdb) != 32);
  5166. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5167. error_index) != 48);
  5168. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5169. num_sg_descriptors) != 50);
  5170. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5171. cdb_length) != 51);
  5172. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5173. lun_number) != 52);
  5174. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5175. sg_descriptors) != 64);
  5176. BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
  5177. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  5178. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5179. header.iu_type) != 0);
  5180. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5181. header.iu_length) != 2);
  5182. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5183. request_id) != 8);
  5184. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5185. error_index) != 10);
  5186. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5187. header.iu_type) != 0);
  5188. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5189. header.iu_length) != 2);
  5190. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5191. header.response_queue_id) != 4);
  5192. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5193. request_id) != 8);
  5194. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5195. data.report_event_configuration.buffer_length) != 12);
  5196. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5197. data.report_event_configuration.sg_descriptors) != 16);
  5198. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5199. data.set_event_configuration.global_event_oq_id) != 10);
  5200. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5201. data.set_event_configuration.buffer_length) != 12);
  5202. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5203. data.set_event_configuration.sg_descriptors) != 16);
  5204. BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
  5205. max_inbound_iu_length) != 6);
  5206. BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
  5207. max_outbound_iu_length) != 14);
  5208. BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
  5209. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5210. data_length) != 0);
  5211. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5212. iq_arbitration_priority_support_bitmask) != 8);
  5213. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5214. maximum_aw_a) != 9);
  5215. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5216. maximum_aw_b) != 10);
  5217. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5218. maximum_aw_c) != 11);
  5219. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5220. max_inbound_queues) != 16);
  5221. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5222. max_elements_per_iq) != 18);
  5223. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5224. max_iq_element_length) != 24);
  5225. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5226. min_iq_element_length) != 26);
  5227. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5228. max_outbound_queues) != 30);
  5229. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5230. max_elements_per_oq) != 32);
  5231. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5232. intr_coalescing_time_granularity) != 34);
  5233. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5234. max_oq_element_length) != 36);
  5235. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5236. min_oq_element_length) != 38);
  5237. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5238. iu_layer_descriptors) != 64);
  5239. BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
  5240. BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
  5241. event_type) != 0);
  5242. BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
  5243. oq_id) != 2);
  5244. BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
  5245. BUILD_BUG_ON(offsetof(struct pqi_event_config,
  5246. num_event_descriptors) != 2);
  5247. BUILD_BUG_ON(offsetof(struct pqi_event_config,
  5248. descriptors) != 4);
  5249. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5250. header.iu_type) != 0);
  5251. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5252. header.iu_length) != 2);
  5253. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5254. event_type) != 8);
  5255. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5256. event_id) != 10);
  5257. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5258. additional_event_id) != 12);
  5259. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5260. data) != 16);
  5261. BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
  5262. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5263. header.iu_type) != 0);
  5264. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5265. header.iu_length) != 2);
  5266. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5267. event_type) != 8);
  5268. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5269. event_id) != 10);
  5270. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5271. additional_event_id) != 12);
  5272. BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
  5273. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5274. header.iu_type) != 0);
  5275. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5276. header.iu_length) != 2);
  5277. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5278. request_id) != 8);
  5279. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5280. nexus_id) != 10);
  5281. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5282. lun_number) != 16);
  5283. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5284. protocol_specific) != 24);
  5285. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5286. outbound_queue_id_to_manage) != 26);
  5287. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5288. request_id_to_manage) != 28);
  5289. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5290. task_management_function) != 30);
  5291. BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
  5292. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5293. header.iu_type) != 0);
  5294. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5295. header.iu_length) != 2);
  5296. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5297. request_id) != 8);
  5298. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5299. nexus_id) != 10);
  5300. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5301. additional_response_info) != 12);
  5302. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5303. response_code) != 15);
  5304. BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
  5305. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5306. configured_logical_drive_count) != 0);
  5307. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5308. configuration_signature) != 1);
  5309. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5310. firmware_version) != 5);
  5311. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5312. extended_logical_unit_count) != 154);
  5313. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5314. firmware_build_number) != 190);
  5315. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5316. controller_mode) != 292);
  5317. BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
  5318. BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
  5319. BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
  5320. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5321. BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
  5322. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5323. BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
  5324. BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
  5325. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5326. BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
  5327. BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
  5328. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5329. BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
  5330. }