skd_main.c 137 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359
  1. /* Copyright 2012 STEC, Inc.
  2. *
  3. * This file is licensed under the terms of the 3-clause
  4. * BSD License (http://opensource.org/licenses/BSD-3-Clause)
  5. * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
  6. * at your option. Both licenses are also available in the LICENSE file
  7. * distributed with this project. This file may not be copied, modified,
  8. * or distributed except in accordance with those terms.
  9. * Gordoni Waidhofer <[email protected]>
  10. * Initial Driver Design!
  11. * Thomas Swann <[email protected]>
  12. * Interrupt handling.
  13. * Ramprasad Chinthekindi <[email protected]>
  14. * biomode implementation.
  15. * Akhil Bhansali <[email protected]>
  16. * Added support for DISCARD / FLUSH and FUA.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/pci.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/sched.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/compiler.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/bitops.h>
  30. #include <linux/delay.h>
  31. #include <linux/time.h>
  32. #include <linux/hdreg.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/completion.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/version.h>
  37. #include <linux/err.h>
  38. #include <linux/scatterlist.h>
  39. #include <linux/aer.h>
  40. #include <linux/ctype.h>
  41. #include <linux/wait.h>
  42. #include <linux/uio.h>
  43. #include <scsi/scsi.h>
  44. #include <scsi/sg.h>
  45. #include <linux/io.h>
  46. #include <linux/uaccess.h>
  47. #include <asm/unaligned.h>
  48. #include "skd_s1120.h"
  49. static int skd_dbg_level;
  50. static int skd_isr_comp_limit = 4;
  51. enum {
  52. STEC_LINK_2_5GTS = 0,
  53. STEC_LINK_5GTS = 1,
  54. STEC_LINK_8GTS = 2,
  55. STEC_LINK_UNKNOWN = 0xFF
  56. };
  57. enum {
  58. SKD_FLUSH_INITIALIZER,
  59. SKD_FLUSH_ZERO_SIZE_FIRST,
  60. SKD_FLUSH_DATA_SECOND,
  61. };
  62. #define SKD_ASSERT(expr) \
  63. do { \
  64. if (unlikely(!(expr))) { \
  65. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  66. # expr, __FILE__, __func__, __LINE__); \
  67. } \
  68. } while (0)
  69. #define DRV_NAME "skd"
  70. #define DRV_VERSION "2.2.1"
  71. #define DRV_BUILD_ID "0260"
  72. #define PFX DRV_NAME ": "
  73. #define DRV_BIN_VERSION 0x100
  74. #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
  75. MODULE_AUTHOR("bug-reports: [email protected]");
  76. MODULE_LICENSE("Dual BSD/GPL");
  77. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
  78. MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  79. #define PCI_VENDOR_ID_STEC 0x1B39
  80. #define PCI_DEVICE_ID_S1120 0x0001
  81. #define SKD_FUA_NV (1 << 1)
  82. #define SKD_MINORS_PER_DEVICE 16
  83. #define SKD_MAX_QUEUE_DEPTH 200u
  84. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  85. #define SKD_N_FITMSG_BYTES (512u)
  86. #define SKD_N_SPECIAL_CONTEXT 32u
  87. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  88. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  89. * 128KB limit. That allows 4096*4K = 16M xfer size
  90. */
  91. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  92. #define SKD_N_SG_PER_SPECIAL 256u
  93. #define SKD_N_COMPLETION_ENTRY 256u
  94. #define SKD_N_READ_CAP_BYTES (8u)
  95. #define SKD_N_INTERNAL_BYTES (512u)
  96. /* 5 bits of uniqifier, 0xF800 */
  97. #define SKD_ID_INCR (0x400)
  98. #define SKD_ID_TABLE_MASK (3u << 8u)
  99. #define SKD_ID_RW_REQUEST (0u << 8u)
  100. #define SKD_ID_INTERNAL (1u << 8u)
  101. #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
  102. #define SKD_ID_FIT_MSG (3u << 8u)
  103. #define SKD_ID_SLOT_MASK 0x00FFu
  104. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  105. #define SKD_N_TIMEOUT_SLOT 4u
  106. #define SKD_TIMEOUT_SLOT_MASK 3u
  107. #define SKD_N_MAX_SECTORS 2048u
  108. #define SKD_MAX_RETRIES 2u
  109. #define SKD_TIMER_SECONDS(seconds) (seconds)
  110. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  111. #define INQ_STD_NBYTES 36
  112. enum skd_drvr_state {
  113. SKD_DRVR_STATE_LOAD,
  114. SKD_DRVR_STATE_IDLE,
  115. SKD_DRVR_STATE_BUSY,
  116. SKD_DRVR_STATE_STARTING,
  117. SKD_DRVR_STATE_ONLINE,
  118. SKD_DRVR_STATE_PAUSING,
  119. SKD_DRVR_STATE_PAUSED,
  120. SKD_DRVR_STATE_DRAINING_TIMEOUT,
  121. SKD_DRVR_STATE_RESTARTING,
  122. SKD_DRVR_STATE_RESUMING,
  123. SKD_DRVR_STATE_STOPPING,
  124. SKD_DRVR_STATE_FAULT,
  125. SKD_DRVR_STATE_DISAPPEARED,
  126. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  127. SKD_DRVR_STATE_BUSY_ERASE,
  128. SKD_DRVR_STATE_BUSY_SANITIZE,
  129. SKD_DRVR_STATE_BUSY_IMMINENT,
  130. SKD_DRVR_STATE_WAIT_BOOT,
  131. SKD_DRVR_STATE_SYNCING,
  132. };
  133. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  134. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  135. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  136. #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
  137. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  138. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  139. #define SKD_START_WAIT_SECONDS 90u
  140. enum skd_req_state {
  141. SKD_REQ_STATE_IDLE,
  142. SKD_REQ_STATE_SETUP,
  143. SKD_REQ_STATE_BUSY,
  144. SKD_REQ_STATE_COMPLETED,
  145. SKD_REQ_STATE_TIMEOUT,
  146. SKD_REQ_STATE_ABORTED,
  147. };
  148. enum skd_fit_msg_state {
  149. SKD_MSG_STATE_IDLE,
  150. SKD_MSG_STATE_BUSY,
  151. };
  152. enum skd_check_status_action {
  153. SKD_CHECK_STATUS_REPORT_GOOD,
  154. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  155. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  156. SKD_CHECK_STATUS_REPORT_ERROR,
  157. SKD_CHECK_STATUS_BUSY_IMMINENT,
  158. };
  159. struct skd_fitmsg_context {
  160. enum skd_fit_msg_state state;
  161. struct skd_fitmsg_context *next;
  162. u32 id;
  163. u16 outstanding;
  164. u32 length;
  165. u32 offset;
  166. u8 *msg_buf;
  167. dma_addr_t mb_dma_address;
  168. };
  169. struct skd_request_context {
  170. enum skd_req_state state;
  171. struct skd_request_context *next;
  172. u16 id;
  173. u32 fitmsg_id;
  174. struct request *req;
  175. u8 flush_cmd;
  176. u32 timeout_stamp;
  177. u8 sg_data_dir;
  178. struct scatterlist *sg;
  179. u32 n_sg;
  180. u32 sg_byte_count;
  181. struct fit_sg_descriptor *sksg_list;
  182. dma_addr_t sksg_dma_address;
  183. struct fit_completion_entry_v1 completion;
  184. struct fit_comp_error_info err_info;
  185. };
  186. #define SKD_DATA_DIR_HOST_TO_CARD 1
  187. #define SKD_DATA_DIR_CARD_TO_HOST 2
  188. struct skd_special_context {
  189. struct skd_request_context req;
  190. u8 orphaned;
  191. void *data_buf;
  192. dma_addr_t db_dma_address;
  193. u8 *msg_buf;
  194. dma_addr_t mb_dma_address;
  195. };
  196. struct skd_sg_io {
  197. fmode_t mode;
  198. void __user *argp;
  199. struct sg_io_hdr sg;
  200. u8 cdb[16];
  201. u32 dxfer_len;
  202. u32 iovcnt;
  203. struct sg_iovec *iov;
  204. struct sg_iovec no_iov_iov;
  205. struct skd_special_context *skspcl;
  206. };
  207. typedef enum skd_irq_type {
  208. SKD_IRQ_LEGACY,
  209. SKD_IRQ_MSI,
  210. SKD_IRQ_MSIX
  211. } skd_irq_type_t;
  212. #define SKD_MAX_BARS 2
  213. struct skd_device {
  214. volatile void __iomem *mem_map[SKD_MAX_BARS];
  215. resource_size_t mem_phys[SKD_MAX_BARS];
  216. u32 mem_size[SKD_MAX_BARS];
  217. skd_irq_type_t irq_type;
  218. u32 msix_count;
  219. struct skd_msix_entry *msix_entries;
  220. struct pci_dev *pdev;
  221. int pcie_error_reporting_is_enabled;
  222. spinlock_t lock;
  223. struct gendisk *disk;
  224. struct request_queue *queue;
  225. struct device *class_dev;
  226. int gendisk_on;
  227. int sync_done;
  228. atomic_t device_count;
  229. u32 devno;
  230. u32 major;
  231. char name[32];
  232. char isr_name[30];
  233. enum skd_drvr_state state;
  234. u32 drive_state;
  235. u32 in_flight;
  236. u32 cur_max_queue_depth;
  237. u32 queue_low_water_mark;
  238. u32 dev_max_queue_depth;
  239. u32 num_fitmsg_context;
  240. u32 num_req_context;
  241. u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
  242. u32 timeout_stamp;
  243. struct skd_fitmsg_context *skmsg_free_list;
  244. struct skd_fitmsg_context *skmsg_table;
  245. struct skd_request_context *skreq_free_list;
  246. struct skd_request_context *skreq_table;
  247. struct skd_special_context *skspcl_free_list;
  248. struct skd_special_context *skspcl_table;
  249. struct skd_special_context internal_skspcl;
  250. u32 read_cap_blocksize;
  251. u32 read_cap_last_lba;
  252. int read_cap_is_valid;
  253. int inquiry_is_valid;
  254. u8 inq_serial_num[13]; /*12 chars plus null term */
  255. u8 id_str[80]; /* holds a composite name (pci + sernum) */
  256. u8 skcomp_cycle;
  257. u32 skcomp_ix;
  258. struct fit_completion_entry_v1 *skcomp_table;
  259. struct fit_comp_error_info *skerr_table;
  260. dma_addr_t cq_dma_address;
  261. wait_queue_head_t waitq;
  262. struct timer_list timer;
  263. u32 timer_countdown;
  264. u32 timer_substate;
  265. int n_special;
  266. int sgs_per_request;
  267. u32 last_mtd;
  268. u32 proto_ver;
  269. int dbg_level;
  270. u32 connect_time_stamp;
  271. int connect_retries;
  272. #define SKD_MAX_CONNECT_RETRIES 16
  273. u32 drive_jiffies;
  274. u32 timo_slot;
  275. struct work_struct completion_worker;
  276. };
  277. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  278. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  279. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  280. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  281. {
  282. u32 val;
  283. if (likely(skdev->dbg_level < 2))
  284. return readl(skdev->mem_map[1] + offset);
  285. else {
  286. barrier();
  287. val = readl(skdev->mem_map[1] + offset);
  288. barrier();
  289. pr_debug("%s:%s:%d offset %x = %x\n",
  290. skdev->name, __func__, __LINE__, offset, val);
  291. return val;
  292. }
  293. }
  294. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  295. u32 offset)
  296. {
  297. if (likely(skdev->dbg_level < 2)) {
  298. writel(val, skdev->mem_map[1] + offset);
  299. barrier();
  300. } else {
  301. barrier();
  302. writel(val, skdev->mem_map[1] + offset);
  303. barrier();
  304. pr_debug("%s:%s:%d offset %x = %x\n",
  305. skdev->name, __func__, __LINE__, offset, val);
  306. }
  307. }
  308. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  309. u32 offset)
  310. {
  311. if (likely(skdev->dbg_level < 2)) {
  312. writeq(val, skdev->mem_map[1] + offset);
  313. barrier();
  314. } else {
  315. barrier();
  316. writeq(val, skdev->mem_map[1] + offset);
  317. barrier();
  318. pr_debug("%s:%s:%d offset %x = %016llx\n",
  319. skdev->name, __func__, __LINE__, offset, val);
  320. }
  321. }
  322. #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
  323. static int skd_isr_type = SKD_IRQ_DEFAULT;
  324. module_param(skd_isr_type, int, 0444);
  325. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  326. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  327. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  328. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  329. module_param(skd_max_req_per_msg, int, 0444);
  330. MODULE_PARM_DESC(skd_max_req_per_msg,
  331. "Maximum SCSI requests packed in a single message."
  332. " (1-14, default==1)");
  333. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  334. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  335. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  336. module_param(skd_max_queue_depth, int, 0444);
  337. MODULE_PARM_DESC(skd_max_queue_depth,
  338. "Maximum SCSI requests issued to s1120."
  339. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  340. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  341. module_param(skd_sgs_per_request, int, 0444);
  342. MODULE_PARM_DESC(skd_sgs_per_request,
  343. "Maximum SG elements per block request."
  344. " (1-4096, default==256)");
  345. static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  346. module_param(skd_max_pass_thru, int, 0444);
  347. MODULE_PARM_DESC(skd_max_pass_thru,
  348. "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
  349. module_param(skd_dbg_level, int, 0444);
  350. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  351. module_param(skd_isr_comp_limit, int, 0444);
  352. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  353. /* Major device number dynamically assigned. */
  354. static u32 skd_major;
  355. static void skd_destruct(struct skd_device *skdev);
  356. static const struct block_device_operations skd_blockdev_ops;
  357. static void skd_send_fitmsg(struct skd_device *skdev,
  358. struct skd_fitmsg_context *skmsg);
  359. static void skd_send_special_fitmsg(struct skd_device *skdev,
  360. struct skd_special_context *skspcl);
  361. static void skd_request_fn(struct request_queue *rq);
  362. static void skd_end_request(struct skd_device *skdev,
  363. struct skd_request_context *skreq, int error);
  364. static int skd_preop_sg_list(struct skd_device *skdev,
  365. struct skd_request_context *skreq);
  366. static void skd_postop_sg_list(struct skd_device *skdev,
  367. struct skd_request_context *skreq);
  368. static void skd_restart_device(struct skd_device *skdev);
  369. static int skd_quiesce_dev(struct skd_device *skdev);
  370. static int skd_unquiesce_dev(struct skd_device *skdev);
  371. static void skd_release_special(struct skd_device *skdev,
  372. struct skd_special_context *skspcl);
  373. static void skd_disable_interrupts(struct skd_device *skdev);
  374. static void skd_isr_fwstate(struct skd_device *skdev);
  375. static void skd_recover_requests(struct skd_device *skdev, int requeue);
  376. static void skd_soft_reset(struct skd_device *skdev);
  377. static const char *skd_name(struct skd_device *skdev);
  378. const char *skd_drive_state_to_str(int state);
  379. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  380. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  381. static void skd_log_skmsg(struct skd_device *skdev,
  382. struct skd_fitmsg_context *skmsg, const char *event);
  383. static void skd_log_skreq(struct skd_device *skdev,
  384. struct skd_request_context *skreq, const char *event);
  385. /*
  386. *****************************************************************************
  387. * READ/WRITE REQUESTS
  388. *****************************************************************************
  389. */
  390. static void skd_fail_all_pending(struct skd_device *skdev)
  391. {
  392. struct request_queue *q = skdev->queue;
  393. struct request *req;
  394. for (;; ) {
  395. req = blk_peek_request(q);
  396. if (req == NULL)
  397. break;
  398. blk_start_request(req);
  399. __blk_end_request_all(req, -EIO);
  400. }
  401. }
  402. static void
  403. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  404. int data_dir, unsigned lba,
  405. unsigned count)
  406. {
  407. if (data_dir == READ)
  408. scsi_req->cdb[0] = 0x28;
  409. else
  410. scsi_req->cdb[0] = 0x2a;
  411. scsi_req->cdb[1] = 0;
  412. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  413. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  414. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  415. scsi_req->cdb[5] = (lba & 0xff);
  416. scsi_req->cdb[6] = 0;
  417. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  418. scsi_req->cdb[8] = count & 0xff;
  419. scsi_req->cdb[9] = 0;
  420. }
  421. static void
  422. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  423. struct skd_request_context *skreq)
  424. {
  425. skreq->flush_cmd = 1;
  426. scsi_req->cdb[0] = 0x35;
  427. scsi_req->cdb[1] = 0;
  428. scsi_req->cdb[2] = 0;
  429. scsi_req->cdb[3] = 0;
  430. scsi_req->cdb[4] = 0;
  431. scsi_req->cdb[5] = 0;
  432. scsi_req->cdb[6] = 0;
  433. scsi_req->cdb[7] = 0;
  434. scsi_req->cdb[8] = 0;
  435. scsi_req->cdb[9] = 0;
  436. }
  437. static void skd_request_fn_not_online(struct request_queue *q);
  438. static void skd_request_fn(struct request_queue *q)
  439. {
  440. struct skd_device *skdev = q->queuedata;
  441. struct skd_fitmsg_context *skmsg = NULL;
  442. struct fit_msg_hdr *fmh = NULL;
  443. struct skd_request_context *skreq;
  444. struct request *req = NULL;
  445. struct skd_scsi_request *scsi_req;
  446. unsigned long io_flags;
  447. int error;
  448. u32 lba;
  449. u32 count;
  450. int data_dir;
  451. u32 be_lba;
  452. u32 be_count;
  453. u64 be_dmaa;
  454. u64 cmdctxt;
  455. u32 timo_slot;
  456. void *cmd_ptr;
  457. int flush, fua;
  458. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  459. skd_request_fn_not_online(q);
  460. return;
  461. }
  462. if (blk_queue_stopped(skdev->queue)) {
  463. if (skdev->skmsg_free_list == NULL ||
  464. skdev->skreq_free_list == NULL ||
  465. skdev->in_flight >= skdev->queue_low_water_mark)
  466. /* There is still some kind of shortage */
  467. return;
  468. queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
  469. }
  470. /*
  471. * Stop conditions:
  472. * - There are no more native requests
  473. * - There are already the maximum number of requests in progress
  474. * - There are no more skd_request_context entries
  475. * - There are no more FIT msg buffers
  476. */
  477. for (;; ) {
  478. flush = fua = 0;
  479. req = blk_peek_request(q);
  480. /* Are there any native requests to start? */
  481. if (req == NULL)
  482. break;
  483. lba = (u32)blk_rq_pos(req);
  484. count = blk_rq_sectors(req);
  485. data_dir = rq_data_dir(req);
  486. io_flags = req->cmd_flags;
  487. if (req_op(req) == REQ_OP_FLUSH)
  488. flush++;
  489. if (io_flags & REQ_FUA)
  490. fua++;
  491. pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
  492. "count=%u(0x%x) dir=%d\n",
  493. skdev->name, __func__, __LINE__,
  494. req, lba, lba, count, count, data_dir);
  495. /* At this point we know there is a request */
  496. /* Are too many requets already in progress? */
  497. if (skdev->in_flight >= skdev->cur_max_queue_depth) {
  498. pr_debug("%s:%s:%d qdepth %d, limit %d\n",
  499. skdev->name, __func__, __LINE__,
  500. skdev->in_flight, skdev->cur_max_queue_depth);
  501. break;
  502. }
  503. /* Is a skd_request_context available? */
  504. skreq = skdev->skreq_free_list;
  505. if (skreq == NULL) {
  506. pr_debug("%s:%s:%d Out of req=%p\n",
  507. skdev->name, __func__, __LINE__, q);
  508. break;
  509. }
  510. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  511. SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
  512. /* Now we check to see if we can get a fit msg */
  513. if (skmsg == NULL) {
  514. if (skdev->skmsg_free_list == NULL) {
  515. pr_debug("%s:%s:%d Out of msg\n",
  516. skdev->name, __func__, __LINE__);
  517. break;
  518. }
  519. }
  520. skreq->flush_cmd = 0;
  521. skreq->n_sg = 0;
  522. skreq->sg_byte_count = 0;
  523. /*
  524. * OK to now dequeue request from q.
  525. *
  526. * At this point we are comitted to either start or reject
  527. * the native request. Note that skd_request_context is
  528. * available but is still at the head of the free list.
  529. */
  530. blk_start_request(req);
  531. skreq->req = req;
  532. skreq->fitmsg_id = 0;
  533. /* Either a FIT msg is in progress or we have to start one. */
  534. if (skmsg == NULL) {
  535. /* Are there any FIT msg buffers available? */
  536. skmsg = skdev->skmsg_free_list;
  537. if (skmsg == NULL) {
  538. pr_debug("%s:%s:%d Out of msg skdev=%p\n",
  539. skdev->name, __func__, __LINE__,
  540. skdev);
  541. break;
  542. }
  543. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
  544. SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
  545. skdev->skmsg_free_list = skmsg->next;
  546. skmsg->state = SKD_MSG_STATE_BUSY;
  547. skmsg->id += SKD_ID_INCR;
  548. /* Initialize the FIT msg header */
  549. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  550. memset(fmh, 0, sizeof(*fmh));
  551. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  552. skmsg->length = sizeof(*fmh);
  553. }
  554. skreq->fitmsg_id = skmsg->id;
  555. /*
  556. * Note that a FIT msg may have just been started
  557. * but contains no SoFIT requests yet.
  558. */
  559. /*
  560. * Transcode the request, checking as we go. The outcome of
  561. * the transcoding is represented by the error variable.
  562. */
  563. cmd_ptr = &skmsg->msg_buf[skmsg->length];
  564. memset(cmd_ptr, 0, 32);
  565. be_lba = cpu_to_be32(lba);
  566. be_count = cpu_to_be32(count);
  567. be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
  568. cmdctxt = skreq->id + SKD_ID_INCR;
  569. scsi_req = cmd_ptr;
  570. scsi_req->hdr.tag = cmdctxt;
  571. scsi_req->hdr.sg_list_dma_address = be_dmaa;
  572. if (data_dir == READ)
  573. skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
  574. else
  575. skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
  576. if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
  577. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  578. SKD_ASSERT(skreq->flush_cmd == 1);
  579. } else {
  580. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  581. }
  582. if (fua)
  583. scsi_req->cdb[1] |= SKD_FUA_NV;
  584. if (!req->bio)
  585. goto skip_sg;
  586. error = skd_preop_sg_list(skdev, skreq);
  587. if (error != 0) {
  588. /*
  589. * Complete the native request with error.
  590. * Note that the request context is still at the
  591. * head of the free list, and that the SoFIT request
  592. * was encoded into the FIT msg buffer but the FIT
  593. * msg length has not been updated. In short, the
  594. * only resource that has been allocated but might
  595. * not be used is that the FIT msg could be empty.
  596. */
  597. pr_debug("%s:%s:%d error Out\n",
  598. skdev->name, __func__, __LINE__);
  599. skd_end_request(skdev, skreq, error);
  600. continue;
  601. }
  602. skip_sg:
  603. scsi_req->hdr.sg_list_len_bytes =
  604. cpu_to_be32(skreq->sg_byte_count);
  605. /* Complete resource allocations. */
  606. skdev->skreq_free_list = skreq->next;
  607. skreq->state = SKD_REQ_STATE_BUSY;
  608. skreq->id += SKD_ID_INCR;
  609. skmsg->length += sizeof(struct skd_scsi_request);
  610. fmh->num_protocol_cmds_coalesced++;
  611. /*
  612. * Update the active request counts.
  613. * Capture the timeout timestamp.
  614. */
  615. skreq->timeout_stamp = skdev->timeout_stamp;
  616. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  617. skdev->timeout_slot[timo_slot]++;
  618. skdev->in_flight++;
  619. pr_debug("%s:%s:%d req=0x%x busy=%d\n",
  620. skdev->name, __func__, __LINE__,
  621. skreq->id, skdev->in_flight);
  622. /*
  623. * If the FIT msg buffer is full send it.
  624. */
  625. if (skmsg->length >= SKD_N_FITMSG_BYTES ||
  626. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  627. skd_send_fitmsg(skdev, skmsg);
  628. skmsg = NULL;
  629. fmh = NULL;
  630. }
  631. }
  632. /*
  633. * Is a FIT msg in progress? If it is empty put the buffer back
  634. * on the free list. If it is non-empty send what we got.
  635. * This minimizes latency when there are fewer requests than
  636. * what fits in a FIT msg.
  637. */
  638. if (skmsg != NULL) {
  639. /* Bigger than just a FIT msg header? */
  640. if (skmsg->length > sizeof(struct fit_msg_hdr)) {
  641. pr_debug("%s:%s:%d sending msg=%p, len %d\n",
  642. skdev->name, __func__, __LINE__,
  643. skmsg, skmsg->length);
  644. skd_send_fitmsg(skdev, skmsg);
  645. } else {
  646. /*
  647. * The FIT msg is empty. It means we got started
  648. * on the msg, but the requests were rejected.
  649. */
  650. skmsg->state = SKD_MSG_STATE_IDLE;
  651. skmsg->id += SKD_ID_INCR;
  652. skmsg->next = skdev->skmsg_free_list;
  653. skdev->skmsg_free_list = skmsg;
  654. }
  655. skmsg = NULL;
  656. fmh = NULL;
  657. }
  658. /*
  659. * If req is non-NULL it means there is something to do but
  660. * we are out of a resource.
  661. */
  662. if (req)
  663. blk_stop_queue(skdev->queue);
  664. }
  665. static void skd_end_request(struct skd_device *skdev,
  666. struct skd_request_context *skreq, int error)
  667. {
  668. if (unlikely(error)) {
  669. struct request *req = skreq->req;
  670. char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
  671. u32 lba = (u32)blk_rq_pos(req);
  672. u32 count = blk_rq_sectors(req);
  673. pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
  674. skd_name(skdev), cmd, lba, count, skreq->id);
  675. } else
  676. pr_debug("%s:%s:%d id=0x%x error=%d\n",
  677. skdev->name, __func__, __LINE__, skreq->id, error);
  678. __blk_end_request_all(skreq->req, error);
  679. }
  680. static int skd_preop_sg_list(struct skd_device *skdev,
  681. struct skd_request_context *skreq)
  682. {
  683. struct request *req = skreq->req;
  684. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  685. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  686. struct scatterlist *sg = &skreq->sg[0];
  687. int n_sg;
  688. int i;
  689. skreq->sg_byte_count = 0;
  690. /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
  691. skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
  692. n_sg = blk_rq_map_sg(skdev->queue, req, sg);
  693. if (n_sg <= 0)
  694. return -EINVAL;
  695. /*
  696. * Map scatterlist to PCI bus addresses.
  697. * Note PCI might change the number of entries.
  698. */
  699. n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
  700. if (n_sg <= 0)
  701. return -EINVAL;
  702. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  703. skreq->n_sg = n_sg;
  704. for (i = 0; i < n_sg; i++) {
  705. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  706. u32 cnt = sg_dma_len(&sg[i]);
  707. uint64_t dma_addr = sg_dma_address(&sg[i]);
  708. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  709. sgd->byte_count = cnt;
  710. skreq->sg_byte_count += cnt;
  711. sgd->host_side_addr = dma_addr;
  712. sgd->dev_side_addr = 0;
  713. }
  714. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  715. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  716. if (unlikely(skdev->dbg_level > 1)) {
  717. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  718. skdev->name, __func__, __LINE__,
  719. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  720. for (i = 0; i < n_sg; i++) {
  721. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  722. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  723. "addr=0x%llx next=0x%llx\n",
  724. skdev->name, __func__, __LINE__,
  725. i, sgd->byte_count, sgd->control,
  726. sgd->host_side_addr, sgd->next_desc_ptr);
  727. }
  728. }
  729. return 0;
  730. }
  731. static void skd_postop_sg_list(struct skd_device *skdev,
  732. struct skd_request_context *skreq)
  733. {
  734. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  735. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  736. /*
  737. * restore the next ptr for next IO request so we
  738. * don't have to set it every time.
  739. */
  740. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  741. skreq->sksg_dma_address +
  742. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  743. pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
  744. }
  745. static void skd_request_fn_not_online(struct request_queue *q)
  746. {
  747. struct skd_device *skdev = q->queuedata;
  748. int error;
  749. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  750. skd_log_skdev(skdev, "req_not_online");
  751. switch (skdev->state) {
  752. case SKD_DRVR_STATE_PAUSING:
  753. case SKD_DRVR_STATE_PAUSED:
  754. case SKD_DRVR_STATE_STARTING:
  755. case SKD_DRVR_STATE_RESTARTING:
  756. case SKD_DRVR_STATE_WAIT_BOOT:
  757. /* In case of starting, we haven't started the queue,
  758. * so we can't get here... but requests are
  759. * possibly hanging out waiting for us because we
  760. * reported the dev/skd0 already. They'll wait
  761. * forever if connect doesn't complete.
  762. * What to do??? delay dev/skd0 ??
  763. */
  764. case SKD_DRVR_STATE_BUSY:
  765. case SKD_DRVR_STATE_BUSY_IMMINENT:
  766. case SKD_DRVR_STATE_BUSY_ERASE:
  767. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  768. return;
  769. case SKD_DRVR_STATE_BUSY_SANITIZE:
  770. case SKD_DRVR_STATE_STOPPING:
  771. case SKD_DRVR_STATE_SYNCING:
  772. case SKD_DRVR_STATE_FAULT:
  773. case SKD_DRVR_STATE_DISAPPEARED:
  774. default:
  775. error = -EIO;
  776. break;
  777. }
  778. /* If we get here, terminate all pending block requeusts
  779. * with EIO and any scsi pass thru with appropriate sense
  780. */
  781. skd_fail_all_pending(skdev);
  782. }
  783. /*
  784. *****************************************************************************
  785. * TIMER
  786. *****************************************************************************
  787. */
  788. static void skd_timer_tick_not_online(struct skd_device *skdev);
  789. static void skd_timer_tick(ulong arg)
  790. {
  791. struct skd_device *skdev = (struct skd_device *)arg;
  792. u32 timo_slot;
  793. u32 overdue_timestamp;
  794. unsigned long reqflags;
  795. u32 state;
  796. if (skdev->state == SKD_DRVR_STATE_FAULT)
  797. /* The driver has declared fault, and we want it to
  798. * stay that way until driver is reloaded.
  799. */
  800. return;
  801. spin_lock_irqsave(&skdev->lock, reqflags);
  802. state = SKD_READL(skdev, FIT_STATUS);
  803. state &= FIT_SR_DRIVE_STATE_MASK;
  804. if (state != skdev->drive_state)
  805. skd_isr_fwstate(skdev);
  806. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  807. skd_timer_tick_not_online(skdev);
  808. goto timer_func_out;
  809. }
  810. skdev->timeout_stamp++;
  811. timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  812. /*
  813. * All requests that happened during the previous use of
  814. * this slot should be done by now. The previous use was
  815. * over 7 seconds ago.
  816. */
  817. if (skdev->timeout_slot[timo_slot] == 0)
  818. goto timer_func_out;
  819. /* Something is overdue */
  820. overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
  821. pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
  822. skdev->name, __func__, __LINE__,
  823. skdev->timeout_slot[timo_slot], skdev->in_flight);
  824. pr_err("(%s): Overdue IOs (%d), busy %d\n",
  825. skd_name(skdev), skdev->timeout_slot[timo_slot],
  826. skdev->in_flight);
  827. skdev->timer_countdown = SKD_DRAINING_TIMO;
  828. skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
  829. skdev->timo_slot = timo_slot;
  830. blk_stop_queue(skdev->queue);
  831. timer_func_out:
  832. mod_timer(&skdev->timer, (jiffies + HZ));
  833. spin_unlock_irqrestore(&skdev->lock, reqflags);
  834. }
  835. static void skd_timer_tick_not_online(struct skd_device *skdev)
  836. {
  837. switch (skdev->state) {
  838. case SKD_DRVR_STATE_IDLE:
  839. case SKD_DRVR_STATE_LOAD:
  840. break;
  841. case SKD_DRVR_STATE_BUSY_SANITIZE:
  842. pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
  843. skdev->name, __func__, __LINE__,
  844. skdev->drive_state, skdev->state);
  845. /* If we've been in sanitize for 3 seconds, we figure we're not
  846. * going to get anymore completions, so recover requests now
  847. */
  848. if (skdev->timer_countdown > 0) {
  849. skdev->timer_countdown--;
  850. return;
  851. }
  852. skd_recover_requests(skdev, 0);
  853. break;
  854. case SKD_DRVR_STATE_BUSY:
  855. case SKD_DRVR_STATE_BUSY_IMMINENT:
  856. case SKD_DRVR_STATE_BUSY_ERASE:
  857. pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
  858. skdev->name, __func__, __LINE__,
  859. skdev->state, skdev->timer_countdown);
  860. if (skdev->timer_countdown > 0) {
  861. skdev->timer_countdown--;
  862. return;
  863. }
  864. pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
  865. skdev->name, __func__, __LINE__,
  866. skdev->state, skdev->timer_countdown);
  867. skd_restart_device(skdev);
  868. break;
  869. case SKD_DRVR_STATE_WAIT_BOOT:
  870. case SKD_DRVR_STATE_STARTING:
  871. if (skdev->timer_countdown > 0) {
  872. skdev->timer_countdown--;
  873. return;
  874. }
  875. /* For now, we fault the drive. Could attempt resets to
  876. * revcover at some point. */
  877. skdev->state = SKD_DRVR_STATE_FAULT;
  878. pr_err("(%s): DriveFault Connect Timeout (%x)\n",
  879. skd_name(skdev), skdev->drive_state);
  880. /*start the queue so we can respond with error to requests */
  881. /* wakeup anyone waiting for startup complete */
  882. blk_start_queue(skdev->queue);
  883. skdev->gendisk_on = -1;
  884. wake_up_interruptible(&skdev->waitq);
  885. break;
  886. case SKD_DRVR_STATE_ONLINE:
  887. /* shouldn't get here. */
  888. break;
  889. case SKD_DRVR_STATE_PAUSING:
  890. case SKD_DRVR_STATE_PAUSED:
  891. break;
  892. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  893. pr_debug("%s:%s:%d "
  894. "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
  895. skdev->name, __func__, __LINE__,
  896. skdev->timo_slot,
  897. skdev->timer_countdown,
  898. skdev->in_flight,
  899. skdev->timeout_slot[skdev->timo_slot]);
  900. /* if the slot has cleared we can let the I/O continue */
  901. if (skdev->timeout_slot[skdev->timo_slot] == 0) {
  902. pr_debug("%s:%s:%d Slot drained, starting queue.\n",
  903. skdev->name, __func__, __LINE__);
  904. skdev->state = SKD_DRVR_STATE_ONLINE;
  905. blk_start_queue(skdev->queue);
  906. return;
  907. }
  908. if (skdev->timer_countdown > 0) {
  909. skdev->timer_countdown--;
  910. return;
  911. }
  912. skd_restart_device(skdev);
  913. break;
  914. case SKD_DRVR_STATE_RESTARTING:
  915. if (skdev->timer_countdown > 0) {
  916. skdev->timer_countdown--;
  917. return;
  918. }
  919. /* For now, we fault the drive. Could attempt resets to
  920. * revcover at some point. */
  921. skdev->state = SKD_DRVR_STATE_FAULT;
  922. pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
  923. skd_name(skdev), skdev->drive_state);
  924. /*
  925. * Recovering does two things:
  926. * 1. completes IO with error
  927. * 2. reclaims dma resources
  928. * When is it safe to recover requests?
  929. * - if the drive state is faulted
  930. * - if the state is still soft reset after out timeout
  931. * - if the drive registers are dead (state = FF)
  932. * If it is "unsafe", we still need to recover, so we will
  933. * disable pci bus mastering and disable our interrupts.
  934. */
  935. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  936. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  937. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  938. /* It never came out of soft reset. Try to
  939. * recover the requests and then let them
  940. * fail. This is to mitigate hung processes. */
  941. skd_recover_requests(skdev, 0);
  942. else {
  943. pr_err("(%s): Disable BusMaster (%x)\n",
  944. skd_name(skdev), skdev->drive_state);
  945. pci_disable_device(skdev->pdev);
  946. skd_disable_interrupts(skdev);
  947. skd_recover_requests(skdev, 0);
  948. }
  949. /*start the queue so we can respond with error to requests */
  950. /* wakeup anyone waiting for startup complete */
  951. blk_start_queue(skdev->queue);
  952. skdev->gendisk_on = -1;
  953. wake_up_interruptible(&skdev->waitq);
  954. break;
  955. case SKD_DRVR_STATE_RESUMING:
  956. case SKD_DRVR_STATE_STOPPING:
  957. case SKD_DRVR_STATE_SYNCING:
  958. case SKD_DRVR_STATE_FAULT:
  959. case SKD_DRVR_STATE_DISAPPEARED:
  960. default:
  961. break;
  962. }
  963. }
  964. static int skd_start_timer(struct skd_device *skdev)
  965. {
  966. int rc;
  967. init_timer(&skdev->timer);
  968. setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
  969. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  970. if (rc)
  971. pr_err("%s: failed to start timer %d\n",
  972. __func__, rc);
  973. return rc;
  974. }
  975. static void skd_kill_timer(struct skd_device *skdev)
  976. {
  977. del_timer_sync(&skdev->timer);
  978. }
  979. /*
  980. *****************************************************************************
  981. * IOCTL
  982. *****************************************************************************
  983. */
  984. static int skd_ioctl_sg_io(struct skd_device *skdev,
  985. fmode_t mode, void __user *argp);
  986. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  987. struct skd_sg_io *sksgio);
  988. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  989. struct skd_sg_io *sksgio);
  990. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  991. struct skd_sg_io *sksgio);
  992. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  993. struct skd_sg_io *sksgio, int dxfer_dir);
  994. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  995. struct skd_sg_io *sksgio);
  996. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
  997. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  998. struct skd_sg_io *sksgio);
  999. static int skd_sg_io_put_status(struct skd_device *skdev,
  1000. struct skd_sg_io *sksgio);
  1001. static void skd_complete_special(struct skd_device *skdev,
  1002. volatile struct fit_completion_entry_v1
  1003. *skcomp,
  1004. volatile struct fit_comp_error_info *skerr,
  1005. struct skd_special_context *skspcl);
  1006. static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
  1007. uint cmd_in, ulong arg)
  1008. {
  1009. int rc = 0;
  1010. struct gendisk *disk = bdev->bd_disk;
  1011. struct skd_device *skdev = disk->private_data;
  1012. void __user *p = (void *)arg;
  1013. pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
  1014. skdev->name, __func__, __LINE__,
  1015. disk->disk_name, current->comm, mode, cmd_in, arg);
  1016. if (!capable(CAP_SYS_ADMIN))
  1017. return -EPERM;
  1018. switch (cmd_in) {
  1019. case SG_SET_TIMEOUT:
  1020. case SG_GET_TIMEOUT:
  1021. case SG_GET_VERSION_NUM:
  1022. rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
  1023. break;
  1024. case SG_IO:
  1025. rc = skd_ioctl_sg_io(skdev, mode, p);
  1026. break;
  1027. default:
  1028. rc = -ENOTTY;
  1029. break;
  1030. }
  1031. pr_debug("%s:%s:%d %s: completion rc %d\n",
  1032. skdev->name, __func__, __LINE__, disk->disk_name, rc);
  1033. return rc;
  1034. }
  1035. static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
  1036. void __user *argp)
  1037. {
  1038. int rc;
  1039. struct skd_sg_io sksgio;
  1040. memset(&sksgio, 0, sizeof(sksgio));
  1041. sksgio.mode = mode;
  1042. sksgio.argp = argp;
  1043. sksgio.iov = &sksgio.no_iov_iov;
  1044. switch (skdev->state) {
  1045. case SKD_DRVR_STATE_ONLINE:
  1046. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1047. break;
  1048. default:
  1049. pr_debug("%s:%s:%d drive not online\n",
  1050. skdev->name, __func__, __LINE__);
  1051. rc = -ENXIO;
  1052. goto out;
  1053. }
  1054. rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
  1055. if (rc)
  1056. goto out;
  1057. rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
  1058. if (rc)
  1059. goto out;
  1060. rc = skd_sg_io_prep_buffering(skdev, &sksgio);
  1061. if (rc)
  1062. goto out;
  1063. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
  1064. if (rc)
  1065. goto out;
  1066. rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
  1067. if (rc)
  1068. goto out;
  1069. rc = skd_sg_io_await(skdev, &sksgio);
  1070. if (rc)
  1071. goto out;
  1072. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
  1073. if (rc)
  1074. goto out;
  1075. rc = skd_sg_io_put_status(skdev, &sksgio);
  1076. if (rc)
  1077. goto out;
  1078. rc = 0;
  1079. out:
  1080. skd_sg_io_release_skspcl(skdev, &sksgio);
  1081. if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
  1082. kfree(sksgio.iov);
  1083. return rc;
  1084. }
  1085. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1086. struct skd_sg_io *sksgio)
  1087. {
  1088. struct sg_io_hdr *sgp = &sksgio->sg;
  1089. int i, acc;
  1090. if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1091. pr_debug("%s:%s:%d access sg failed %p\n",
  1092. skdev->name, __func__, __LINE__, sksgio->argp);
  1093. return -EFAULT;
  1094. }
  1095. if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1096. pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
  1097. skdev->name, __func__, __LINE__, sksgio->argp);
  1098. return -EFAULT;
  1099. }
  1100. if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
  1101. pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
  1102. skdev->name, __func__, __LINE__, sgp->interface_id);
  1103. return -EINVAL;
  1104. }
  1105. if (sgp->cmd_len > sizeof(sksgio->cdb)) {
  1106. pr_debug("%s:%s:%d cmd_len invalid %d\n",
  1107. skdev->name, __func__, __LINE__, sgp->cmd_len);
  1108. return -EINVAL;
  1109. }
  1110. if (sgp->iovec_count > 256) {
  1111. pr_debug("%s:%s:%d iovec_count invalid %d\n",
  1112. skdev->name, __func__, __LINE__, sgp->iovec_count);
  1113. return -EINVAL;
  1114. }
  1115. if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
  1116. pr_debug("%s:%s:%d dxfer_len invalid %d\n",
  1117. skdev->name, __func__, __LINE__, sgp->dxfer_len);
  1118. return -EINVAL;
  1119. }
  1120. switch (sgp->dxfer_direction) {
  1121. case SG_DXFER_NONE:
  1122. acc = -1;
  1123. break;
  1124. case SG_DXFER_TO_DEV:
  1125. acc = VERIFY_READ;
  1126. break;
  1127. case SG_DXFER_FROM_DEV:
  1128. case SG_DXFER_TO_FROM_DEV:
  1129. acc = VERIFY_WRITE;
  1130. break;
  1131. default:
  1132. pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
  1133. skdev->name, __func__, __LINE__, sgp->dxfer_direction);
  1134. return -EINVAL;
  1135. }
  1136. if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
  1137. pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
  1138. skdev->name, __func__, __LINE__, sgp->cmdp);
  1139. return -EFAULT;
  1140. }
  1141. if (sgp->mx_sb_len != 0) {
  1142. if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
  1143. pr_debug("%s:%s:%d access sbp failed %p\n",
  1144. skdev->name, __func__, __LINE__, sgp->sbp);
  1145. return -EFAULT;
  1146. }
  1147. }
  1148. if (sgp->iovec_count == 0) {
  1149. sksgio->iov[0].iov_base = sgp->dxferp;
  1150. sksgio->iov[0].iov_len = sgp->dxfer_len;
  1151. sksgio->iovcnt = 1;
  1152. sksgio->dxfer_len = sgp->dxfer_len;
  1153. } else {
  1154. struct sg_iovec *iov;
  1155. uint nbytes = sizeof(*iov) * sgp->iovec_count;
  1156. size_t iov_data_len;
  1157. iov = kmalloc(nbytes, GFP_KERNEL);
  1158. if (iov == NULL) {
  1159. pr_debug("%s:%s:%d alloc iovec failed %d\n",
  1160. skdev->name, __func__, __LINE__,
  1161. sgp->iovec_count);
  1162. return -ENOMEM;
  1163. }
  1164. sksgio->iov = iov;
  1165. sksgio->iovcnt = sgp->iovec_count;
  1166. if (copy_from_user(iov, sgp->dxferp, nbytes)) {
  1167. pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
  1168. skdev->name, __func__, __LINE__, sgp->dxferp);
  1169. return -EFAULT;
  1170. }
  1171. /*
  1172. * Sum up the vecs, making sure they don't overflow
  1173. */
  1174. iov_data_len = 0;
  1175. for (i = 0; i < sgp->iovec_count; i++) {
  1176. if (iov_data_len + iov[i].iov_len < iov_data_len)
  1177. return -EINVAL;
  1178. iov_data_len += iov[i].iov_len;
  1179. }
  1180. /* SG_IO howto says that the shorter of the two wins */
  1181. if (sgp->dxfer_len < iov_data_len) {
  1182. sksgio->iovcnt = iov_shorten((struct iovec *)iov,
  1183. sgp->iovec_count,
  1184. sgp->dxfer_len);
  1185. sksgio->dxfer_len = sgp->dxfer_len;
  1186. } else
  1187. sksgio->dxfer_len = iov_data_len;
  1188. }
  1189. if (sgp->dxfer_direction != SG_DXFER_NONE) {
  1190. struct sg_iovec *iov = sksgio->iov;
  1191. for (i = 0; i < sksgio->iovcnt; i++, iov++) {
  1192. if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
  1193. pr_debug("%s:%s:%d access data failed %p/%d\n",
  1194. skdev->name, __func__, __LINE__,
  1195. iov->iov_base, (int)iov->iov_len);
  1196. return -EFAULT;
  1197. }
  1198. }
  1199. }
  1200. return 0;
  1201. }
  1202. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1203. struct skd_sg_io *sksgio)
  1204. {
  1205. struct skd_special_context *skspcl = NULL;
  1206. int rc;
  1207. for (;;) {
  1208. ulong flags;
  1209. spin_lock_irqsave(&skdev->lock, flags);
  1210. skspcl = skdev->skspcl_free_list;
  1211. if (skspcl != NULL) {
  1212. skdev->skspcl_free_list =
  1213. (struct skd_special_context *)skspcl->req.next;
  1214. skspcl->req.id += SKD_ID_INCR;
  1215. skspcl->req.state = SKD_REQ_STATE_SETUP;
  1216. skspcl->orphaned = 0;
  1217. skspcl->req.n_sg = 0;
  1218. }
  1219. spin_unlock_irqrestore(&skdev->lock, flags);
  1220. if (skspcl != NULL) {
  1221. rc = 0;
  1222. break;
  1223. }
  1224. pr_debug("%s:%s:%d blocking\n",
  1225. skdev->name, __func__, __LINE__);
  1226. rc = wait_event_interruptible_timeout(
  1227. skdev->waitq,
  1228. (skdev->skspcl_free_list != NULL),
  1229. msecs_to_jiffies(sksgio->sg.timeout));
  1230. pr_debug("%s:%s:%d unblocking, rc=%d\n",
  1231. skdev->name, __func__, __LINE__, rc);
  1232. if (rc <= 0) {
  1233. if (rc == 0)
  1234. rc = -ETIMEDOUT;
  1235. else
  1236. rc = -EINTR;
  1237. break;
  1238. }
  1239. /*
  1240. * If we get here rc > 0 meaning the timeout to
  1241. * wait_event_interruptible_timeout() had time left, hence the
  1242. * sought event -- non-empty free list -- happened.
  1243. * Retry the allocation.
  1244. */
  1245. }
  1246. sksgio->skspcl = skspcl;
  1247. return rc;
  1248. }
  1249. static int skd_skreq_prep_buffering(struct skd_device *skdev,
  1250. struct skd_request_context *skreq,
  1251. u32 dxfer_len)
  1252. {
  1253. u32 resid = dxfer_len;
  1254. /*
  1255. * The DMA engine must have aligned addresses and byte counts.
  1256. */
  1257. resid += (-resid) & 3;
  1258. skreq->sg_byte_count = resid;
  1259. skreq->n_sg = 0;
  1260. while (resid > 0) {
  1261. u32 nbytes = PAGE_SIZE;
  1262. u32 ix = skreq->n_sg;
  1263. struct scatterlist *sg = &skreq->sg[ix];
  1264. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1265. struct page *page;
  1266. if (nbytes > resid)
  1267. nbytes = resid;
  1268. page = alloc_page(GFP_KERNEL);
  1269. if (page == NULL)
  1270. return -ENOMEM;
  1271. sg_set_page(sg, page, nbytes, 0);
  1272. /* TODO: This should be going through a pci_???()
  1273. * routine to do proper mapping. */
  1274. sksg->control = FIT_SGD_CONTROL_NOT_LAST;
  1275. sksg->byte_count = nbytes;
  1276. sksg->host_side_addr = sg_phys(sg);
  1277. sksg->dev_side_addr = 0;
  1278. sksg->next_desc_ptr = skreq->sksg_dma_address +
  1279. (ix + 1) * sizeof(*sksg);
  1280. skreq->n_sg++;
  1281. resid -= nbytes;
  1282. }
  1283. if (skreq->n_sg > 0) {
  1284. u32 ix = skreq->n_sg - 1;
  1285. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1286. sksg->control = FIT_SGD_CONTROL_LAST;
  1287. sksg->next_desc_ptr = 0;
  1288. }
  1289. if (unlikely(skdev->dbg_level > 1)) {
  1290. u32 i;
  1291. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  1292. skdev->name, __func__, __LINE__,
  1293. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  1294. for (i = 0; i < skreq->n_sg; i++) {
  1295. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  1296. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1297. "addr=0x%llx next=0x%llx\n",
  1298. skdev->name, __func__, __LINE__,
  1299. i, sgd->byte_count, sgd->control,
  1300. sgd->host_side_addr, sgd->next_desc_ptr);
  1301. }
  1302. }
  1303. return 0;
  1304. }
  1305. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1306. struct skd_sg_io *sksgio)
  1307. {
  1308. struct skd_special_context *skspcl = sksgio->skspcl;
  1309. struct skd_request_context *skreq = &skspcl->req;
  1310. u32 dxfer_len = sksgio->dxfer_len;
  1311. int rc;
  1312. rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
  1313. /*
  1314. * Eventually, errors or not, skd_release_special() is called
  1315. * to recover allocations including partial allocations.
  1316. */
  1317. return rc;
  1318. }
  1319. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1320. struct skd_sg_io *sksgio, int dxfer_dir)
  1321. {
  1322. struct skd_special_context *skspcl = sksgio->skspcl;
  1323. u32 iov_ix = 0;
  1324. struct sg_iovec curiov;
  1325. u32 sksg_ix = 0;
  1326. u8 *bufp = NULL;
  1327. u32 buf_len = 0;
  1328. u32 resid = sksgio->dxfer_len;
  1329. int rc;
  1330. curiov.iov_len = 0;
  1331. curiov.iov_base = NULL;
  1332. if (dxfer_dir != sksgio->sg.dxfer_direction) {
  1333. if (dxfer_dir != SG_DXFER_TO_DEV ||
  1334. sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
  1335. return 0;
  1336. }
  1337. while (resid > 0) {
  1338. u32 nbytes = PAGE_SIZE;
  1339. if (curiov.iov_len == 0) {
  1340. curiov = sksgio->iov[iov_ix++];
  1341. continue;
  1342. }
  1343. if (buf_len == 0) {
  1344. struct page *page;
  1345. page = sg_page(&skspcl->req.sg[sksg_ix++]);
  1346. bufp = page_address(page);
  1347. buf_len = PAGE_SIZE;
  1348. }
  1349. nbytes = min_t(u32, nbytes, resid);
  1350. nbytes = min_t(u32, nbytes, curiov.iov_len);
  1351. nbytes = min_t(u32, nbytes, buf_len);
  1352. if (dxfer_dir == SG_DXFER_TO_DEV)
  1353. rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
  1354. else
  1355. rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
  1356. if (rc)
  1357. return -EFAULT;
  1358. resid -= nbytes;
  1359. curiov.iov_len -= nbytes;
  1360. curiov.iov_base += nbytes;
  1361. buf_len -= nbytes;
  1362. }
  1363. return 0;
  1364. }
  1365. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1366. struct skd_sg_io *sksgio)
  1367. {
  1368. struct skd_special_context *skspcl = sksgio->skspcl;
  1369. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  1370. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  1371. memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
  1372. /* Initialize the FIT msg header */
  1373. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1374. fmh->num_protocol_cmds_coalesced = 1;
  1375. /* Initialize the SCSI request */
  1376. if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
  1377. scsi_req->hdr.sg_list_dma_address =
  1378. cpu_to_be64(skspcl->req.sksg_dma_address);
  1379. scsi_req->hdr.tag = skspcl->req.id;
  1380. scsi_req->hdr.sg_list_len_bytes =
  1381. cpu_to_be32(skspcl->req.sg_byte_count);
  1382. memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
  1383. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1384. skd_send_special_fitmsg(skdev, skspcl);
  1385. return 0;
  1386. }
  1387. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
  1388. {
  1389. unsigned long flags;
  1390. int rc;
  1391. rc = wait_event_interruptible_timeout(skdev->waitq,
  1392. (sksgio->skspcl->req.state !=
  1393. SKD_REQ_STATE_BUSY),
  1394. msecs_to_jiffies(sksgio->sg.
  1395. timeout));
  1396. spin_lock_irqsave(&skdev->lock, flags);
  1397. if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
  1398. pr_debug("%s:%s:%d skspcl %p aborted\n",
  1399. skdev->name, __func__, __LINE__, sksgio->skspcl);
  1400. /* Build check cond, sense and let command finish. */
  1401. /* For a timeout, we must fabricate completion and sense
  1402. * data to complete the command */
  1403. sksgio->skspcl->req.completion.status =
  1404. SAM_STAT_CHECK_CONDITION;
  1405. memset(&sksgio->skspcl->req.err_info, 0,
  1406. sizeof(sksgio->skspcl->req.err_info));
  1407. sksgio->skspcl->req.err_info.type = 0x70;
  1408. sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
  1409. sksgio->skspcl->req.err_info.code = 0x44;
  1410. sksgio->skspcl->req.err_info.qual = 0;
  1411. rc = 0;
  1412. } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
  1413. /* No longer on the adapter. We finish. */
  1414. rc = 0;
  1415. else {
  1416. /* Something's gone wrong. Still busy. Timeout or
  1417. * user interrupted (control-C). Mark as an orphan
  1418. * so it will be disposed when completed. */
  1419. sksgio->skspcl->orphaned = 1;
  1420. sksgio->skspcl = NULL;
  1421. if (rc == 0) {
  1422. pr_debug("%s:%s:%d timed out %p (%u ms)\n",
  1423. skdev->name, __func__, __LINE__,
  1424. sksgio, sksgio->sg.timeout);
  1425. rc = -ETIMEDOUT;
  1426. } else {
  1427. pr_debug("%s:%s:%d cntlc %p\n",
  1428. skdev->name, __func__, __LINE__, sksgio);
  1429. rc = -EINTR;
  1430. }
  1431. }
  1432. spin_unlock_irqrestore(&skdev->lock, flags);
  1433. return rc;
  1434. }
  1435. static int skd_sg_io_put_status(struct skd_device *skdev,
  1436. struct skd_sg_io *sksgio)
  1437. {
  1438. struct sg_io_hdr *sgp = &sksgio->sg;
  1439. struct skd_special_context *skspcl = sksgio->skspcl;
  1440. int resid = 0;
  1441. u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
  1442. sgp->status = skspcl->req.completion.status;
  1443. resid = sksgio->dxfer_len - nb;
  1444. sgp->masked_status = sgp->status & STATUS_MASK;
  1445. sgp->msg_status = 0;
  1446. sgp->host_status = 0;
  1447. sgp->driver_status = 0;
  1448. sgp->resid = resid;
  1449. if (sgp->masked_status || sgp->host_status || sgp->driver_status)
  1450. sgp->info |= SG_INFO_CHECK;
  1451. pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
  1452. skdev->name, __func__, __LINE__,
  1453. sgp->status, sgp->masked_status, sgp->resid);
  1454. if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
  1455. if (sgp->mx_sb_len > 0) {
  1456. struct fit_comp_error_info *ei = &skspcl->req.err_info;
  1457. u32 nbytes = sizeof(*ei);
  1458. nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
  1459. sgp->sb_len_wr = nbytes;
  1460. if (__copy_to_user(sgp->sbp, ei, nbytes)) {
  1461. pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
  1462. skdev->name, __func__, __LINE__,
  1463. sgp->sbp);
  1464. return -EFAULT;
  1465. }
  1466. }
  1467. }
  1468. if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
  1469. pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
  1470. skdev->name, __func__, __LINE__, sksgio->argp);
  1471. return -EFAULT;
  1472. }
  1473. return 0;
  1474. }
  1475. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1476. struct skd_sg_io *sksgio)
  1477. {
  1478. struct skd_special_context *skspcl = sksgio->skspcl;
  1479. if (skspcl != NULL) {
  1480. ulong flags;
  1481. sksgio->skspcl = NULL;
  1482. spin_lock_irqsave(&skdev->lock, flags);
  1483. skd_release_special(skdev, skspcl);
  1484. spin_unlock_irqrestore(&skdev->lock, flags);
  1485. }
  1486. return 0;
  1487. }
  1488. /*
  1489. *****************************************************************************
  1490. * INTERNAL REQUESTS -- generated by driver itself
  1491. *****************************************************************************
  1492. */
  1493. static int skd_format_internal_skspcl(struct skd_device *skdev)
  1494. {
  1495. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1496. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1497. struct fit_msg_hdr *fmh;
  1498. uint64_t dma_address;
  1499. struct skd_scsi_request *scsi;
  1500. fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
  1501. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1502. fmh->num_protocol_cmds_coalesced = 1;
  1503. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1504. memset(scsi, 0, sizeof(*scsi));
  1505. dma_address = skspcl->req.sksg_dma_address;
  1506. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  1507. sgd->control = FIT_SGD_CONTROL_LAST;
  1508. sgd->byte_count = 0;
  1509. sgd->host_side_addr = skspcl->db_dma_address;
  1510. sgd->dev_side_addr = 0;
  1511. sgd->next_desc_ptr = 0LL;
  1512. return 1;
  1513. }
  1514. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  1515. static void skd_send_internal_skspcl(struct skd_device *skdev,
  1516. struct skd_special_context *skspcl,
  1517. u8 opcode)
  1518. {
  1519. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1520. struct skd_scsi_request *scsi;
  1521. unsigned char *buf = skspcl->data_buf;
  1522. int i;
  1523. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  1524. /*
  1525. * A refresh is already in progress.
  1526. * Just wait for it to finish.
  1527. */
  1528. return;
  1529. SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
  1530. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1531. skspcl->req.id += SKD_ID_INCR;
  1532. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1533. scsi->hdr.tag = skspcl->req.id;
  1534. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  1535. switch (opcode) {
  1536. case TEST_UNIT_READY:
  1537. scsi->cdb[0] = TEST_UNIT_READY;
  1538. sgd->byte_count = 0;
  1539. scsi->hdr.sg_list_len_bytes = 0;
  1540. break;
  1541. case READ_CAPACITY:
  1542. scsi->cdb[0] = READ_CAPACITY;
  1543. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  1544. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1545. break;
  1546. case INQUIRY:
  1547. scsi->cdb[0] = INQUIRY;
  1548. scsi->cdb[1] = 0x01; /* evpd */
  1549. scsi->cdb[2] = 0x80; /* serial number page */
  1550. scsi->cdb[4] = 0x10;
  1551. sgd->byte_count = 16;
  1552. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1553. break;
  1554. case SYNCHRONIZE_CACHE:
  1555. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  1556. sgd->byte_count = 0;
  1557. scsi->hdr.sg_list_len_bytes = 0;
  1558. break;
  1559. case WRITE_BUFFER:
  1560. scsi->cdb[0] = WRITE_BUFFER;
  1561. scsi->cdb[1] = 0x02;
  1562. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1563. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1564. sgd->byte_count = WR_BUF_SIZE;
  1565. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1566. /* fill incrementing byte pattern */
  1567. for (i = 0; i < sgd->byte_count; i++)
  1568. buf[i] = i & 0xFF;
  1569. break;
  1570. case READ_BUFFER:
  1571. scsi->cdb[0] = READ_BUFFER;
  1572. scsi->cdb[1] = 0x02;
  1573. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1574. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1575. sgd->byte_count = WR_BUF_SIZE;
  1576. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1577. memset(skspcl->data_buf, 0, sgd->byte_count);
  1578. break;
  1579. default:
  1580. SKD_ASSERT("Don't know what to send");
  1581. return;
  1582. }
  1583. skd_send_special_fitmsg(skdev, skspcl);
  1584. }
  1585. static void skd_refresh_device_data(struct skd_device *skdev)
  1586. {
  1587. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1588. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  1589. }
  1590. static int skd_chk_read_buf(struct skd_device *skdev,
  1591. struct skd_special_context *skspcl)
  1592. {
  1593. unsigned char *buf = skspcl->data_buf;
  1594. int i;
  1595. /* check for incrementing byte pattern */
  1596. for (i = 0; i < WR_BUF_SIZE; i++)
  1597. if (buf[i] != (i & 0xFF))
  1598. return 1;
  1599. return 0;
  1600. }
  1601. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  1602. u8 code, u8 qual, u8 fruc)
  1603. {
  1604. /* If the check condition is of special interest, log a message */
  1605. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  1606. && (code == 0x04) && (qual == 0x06)) {
  1607. pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
  1608. "ascq/fruc %02x/%02x/%02x/%02x\n",
  1609. skd_name(skdev), key, code, qual, fruc);
  1610. }
  1611. }
  1612. static void skd_complete_internal(struct skd_device *skdev,
  1613. volatile struct fit_completion_entry_v1
  1614. *skcomp,
  1615. volatile struct fit_comp_error_info *skerr,
  1616. struct skd_special_context *skspcl)
  1617. {
  1618. u8 *buf = skspcl->data_buf;
  1619. u8 status;
  1620. int i;
  1621. struct skd_scsi_request *scsi =
  1622. (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1623. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  1624. pr_debug("%s:%s:%d complete internal %x\n",
  1625. skdev->name, __func__, __LINE__, scsi->cdb[0]);
  1626. skspcl->req.completion = *skcomp;
  1627. skspcl->req.state = SKD_REQ_STATE_IDLE;
  1628. skspcl->req.id += SKD_ID_INCR;
  1629. status = skspcl->req.completion.status;
  1630. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  1631. skerr->qual, skerr->fruc);
  1632. switch (scsi->cdb[0]) {
  1633. case TEST_UNIT_READY:
  1634. if (status == SAM_STAT_GOOD)
  1635. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1636. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1637. (skerr->key == MEDIUM_ERROR))
  1638. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1639. else {
  1640. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1641. pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
  1642. skdev->name, __func__, __LINE__,
  1643. skdev->state);
  1644. return;
  1645. }
  1646. pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
  1647. skdev->name, __func__, __LINE__);
  1648. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1649. }
  1650. break;
  1651. case WRITE_BUFFER:
  1652. if (status == SAM_STAT_GOOD)
  1653. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  1654. else {
  1655. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1656. pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
  1657. skdev->name, __func__, __LINE__,
  1658. skdev->state);
  1659. return;
  1660. }
  1661. pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
  1662. skdev->name, __func__, __LINE__);
  1663. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1664. }
  1665. break;
  1666. case READ_BUFFER:
  1667. if (status == SAM_STAT_GOOD) {
  1668. if (skd_chk_read_buf(skdev, skspcl) == 0)
  1669. skd_send_internal_skspcl(skdev, skspcl,
  1670. READ_CAPACITY);
  1671. else {
  1672. pr_err(
  1673. "(%s):*** W/R Buffer mismatch %d ***\n",
  1674. skd_name(skdev), skdev->connect_retries);
  1675. if (skdev->connect_retries <
  1676. SKD_MAX_CONNECT_RETRIES) {
  1677. skdev->connect_retries++;
  1678. skd_soft_reset(skdev);
  1679. } else {
  1680. pr_err(
  1681. "(%s): W/R Buffer Connect Error\n",
  1682. skd_name(skdev));
  1683. return;
  1684. }
  1685. }
  1686. } else {
  1687. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1688. pr_debug("%s:%s:%d "
  1689. "read buffer failed, don't send anymore state 0x%x\n",
  1690. skdev->name, __func__, __LINE__,
  1691. skdev->state);
  1692. return;
  1693. }
  1694. pr_debug("%s:%s:%d "
  1695. "**** read buffer failed, retry skerr\n",
  1696. skdev->name, __func__, __LINE__);
  1697. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1698. }
  1699. break;
  1700. case READ_CAPACITY:
  1701. skdev->read_cap_is_valid = 0;
  1702. if (status == SAM_STAT_GOOD) {
  1703. skdev->read_cap_last_lba =
  1704. (buf[0] << 24) | (buf[1] << 16) |
  1705. (buf[2] << 8) | buf[3];
  1706. skdev->read_cap_blocksize =
  1707. (buf[4] << 24) | (buf[5] << 16) |
  1708. (buf[6] << 8) | buf[7];
  1709. pr_debug("%s:%s:%d last lba %d, bs %d\n",
  1710. skdev->name, __func__, __LINE__,
  1711. skdev->read_cap_last_lba,
  1712. skdev->read_cap_blocksize);
  1713. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1714. skdev->read_cap_is_valid = 1;
  1715. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1716. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1717. (skerr->key == MEDIUM_ERROR)) {
  1718. skdev->read_cap_last_lba = ~0;
  1719. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1720. pr_debug("%s:%s:%d "
  1721. "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
  1722. skdev->name, __func__, __LINE__);
  1723. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1724. } else {
  1725. pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
  1726. skdev->name, __func__, __LINE__);
  1727. skd_send_internal_skspcl(skdev, skspcl,
  1728. TEST_UNIT_READY);
  1729. }
  1730. break;
  1731. case INQUIRY:
  1732. skdev->inquiry_is_valid = 0;
  1733. if (status == SAM_STAT_GOOD) {
  1734. skdev->inquiry_is_valid = 1;
  1735. for (i = 0; i < 12; i++)
  1736. skdev->inq_serial_num[i] = buf[i + 4];
  1737. skdev->inq_serial_num[12] = 0;
  1738. }
  1739. if (skd_unquiesce_dev(skdev) < 0)
  1740. pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
  1741. skdev->name, __func__, __LINE__);
  1742. /* connection is complete */
  1743. skdev->connect_retries = 0;
  1744. break;
  1745. case SYNCHRONIZE_CACHE:
  1746. if (status == SAM_STAT_GOOD)
  1747. skdev->sync_done = 1;
  1748. else
  1749. skdev->sync_done = -1;
  1750. wake_up_interruptible(&skdev->waitq);
  1751. break;
  1752. default:
  1753. SKD_ASSERT("we didn't send this");
  1754. }
  1755. }
  1756. /*
  1757. *****************************************************************************
  1758. * FIT MESSAGES
  1759. *****************************************************************************
  1760. */
  1761. static void skd_send_fitmsg(struct skd_device *skdev,
  1762. struct skd_fitmsg_context *skmsg)
  1763. {
  1764. u64 qcmd;
  1765. struct fit_msg_hdr *fmh;
  1766. pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
  1767. skdev->name, __func__, __LINE__,
  1768. skmsg->mb_dma_address, skdev->in_flight);
  1769. pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
  1770. skdev->name, __func__, __LINE__,
  1771. skmsg->msg_buf, skmsg->offset);
  1772. qcmd = skmsg->mb_dma_address;
  1773. qcmd |= FIT_QCMD_QID_NORMAL;
  1774. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  1775. skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
  1776. if (unlikely(skdev->dbg_level > 1)) {
  1777. u8 *bp = (u8 *)skmsg->msg_buf;
  1778. int i;
  1779. for (i = 0; i < skmsg->length; i += 8) {
  1780. pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
  1781. "%02x %02x %02x %02x\n",
  1782. skdev->name, __func__, __LINE__,
  1783. i, bp[i + 0], bp[i + 1], bp[i + 2],
  1784. bp[i + 3], bp[i + 4], bp[i + 5],
  1785. bp[i + 6], bp[i + 7]);
  1786. if (i == 0)
  1787. i = 64 - 8;
  1788. }
  1789. }
  1790. if (skmsg->length > 256)
  1791. qcmd |= FIT_QCMD_MSGSIZE_512;
  1792. else if (skmsg->length > 128)
  1793. qcmd |= FIT_QCMD_MSGSIZE_256;
  1794. else if (skmsg->length > 64)
  1795. qcmd |= FIT_QCMD_MSGSIZE_128;
  1796. else
  1797. /*
  1798. * This makes no sense because the FIT msg header is
  1799. * 64 bytes. If the msg is only 64 bytes long it has
  1800. * no payload.
  1801. */
  1802. qcmd |= FIT_QCMD_MSGSIZE_64;
  1803. /* Make sure skd_msg_buf is written before the doorbell is triggered. */
  1804. smp_wmb();
  1805. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1806. }
  1807. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1808. struct skd_special_context *skspcl)
  1809. {
  1810. u64 qcmd;
  1811. if (unlikely(skdev->dbg_level > 1)) {
  1812. u8 *bp = (u8 *)skspcl->msg_buf;
  1813. int i;
  1814. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1815. pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
  1816. "%02x %02x %02x %02x\n",
  1817. skdev->name, __func__, __LINE__, i,
  1818. bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
  1819. bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
  1820. if (i == 0)
  1821. i = 64 - 8;
  1822. }
  1823. pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
  1824. skdev->name, __func__, __LINE__,
  1825. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1826. skspcl->req.sksg_dma_address);
  1827. for (i = 0; i < skspcl->req.n_sg; i++) {
  1828. struct fit_sg_descriptor *sgd =
  1829. &skspcl->req.sksg_list[i];
  1830. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1831. "addr=0x%llx next=0x%llx\n",
  1832. skdev->name, __func__, __LINE__,
  1833. i, sgd->byte_count, sgd->control,
  1834. sgd->host_side_addr, sgd->next_desc_ptr);
  1835. }
  1836. }
  1837. /*
  1838. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1839. * and one 64-byte SSDI command.
  1840. */
  1841. qcmd = skspcl->mb_dma_address;
  1842. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1843. /* Make sure skd_msg_buf is written before the doorbell is triggered. */
  1844. smp_wmb();
  1845. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1846. }
  1847. /*
  1848. *****************************************************************************
  1849. * COMPLETION QUEUE
  1850. *****************************************************************************
  1851. */
  1852. static void skd_complete_other(struct skd_device *skdev,
  1853. volatile struct fit_completion_entry_v1 *skcomp,
  1854. volatile struct fit_comp_error_info *skerr);
  1855. struct sns_info {
  1856. u8 type;
  1857. u8 stat;
  1858. u8 key;
  1859. u8 asc;
  1860. u8 ascq;
  1861. u8 mask;
  1862. enum skd_check_status_action action;
  1863. };
  1864. static struct sns_info skd_chkstat_table[] = {
  1865. /* Good */
  1866. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1867. SKD_CHECK_STATUS_REPORT_GOOD },
  1868. /* Smart alerts */
  1869. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1870. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1871. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1872. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1873. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1874. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1875. /* Retry (with limits) */
  1876. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1877. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1878. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1879. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1880. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1881. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1882. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1883. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1884. /* Busy (or about to be) */
  1885. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1886. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1887. };
  1888. /*
  1889. * Look up status and sense data to decide how to handle the error
  1890. * from the device.
  1891. * mask says which fields must match e.g., mask=0x18 means check
  1892. * type and stat, ignore key, asc, ascq.
  1893. */
  1894. static enum skd_check_status_action
  1895. skd_check_status(struct skd_device *skdev,
  1896. u8 cmp_status, volatile struct fit_comp_error_info *skerr)
  1897. {
  1898. int i, n;
  1899. pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1900. skd_name(skdev), skerr->key, skerr->code, skerr->qual,
  1901. skerr->fruc);
  1902. pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1903. skdev->name, __func__, __LINE__, skerr->type, cmp_status,
  1904. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1905. /* Does the info match an entry in the good category? */
  1906. n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
  1907. for (i = 0; i < n; i++) {
  1908. struct sns_info *sns = &skd_chkstat_table[i];
  1909. if (sns->mask & 0x10)
  1910. if (skerr->type != sns->type)
  1911. continue;
  1912. if (sns->mask & 0x08)
  1913. if (cmp_status != sns->stat)
  1914. continue;
  1915. if (sns->mask & 0x04)
  1916. if (skerr->key != sns->key)
  1917. continue;
  1918. if (sns->mask & 0x02)
  1919. if (skerr->code != sns->asc)
  1920. continue;
  1921. if (sns->mask & 0x01)
  1922. if (skerr->qual != sns->ascq)
  1923. continue;
  1924. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1925. pr_err("(%s): SMART Alert: sense key/asc/ascq "
  1926. "%02x/%02x/%02x\n",
  1927. skd_name(skdev), skerr->key,
  1928. skerr->code, skerr->qual);
  1929. }
  1930. return sns->action;
  1931. }
  1932. /* No other match, so nonzero status means error,
  1933. * zero status means good
  1934. */
  1935. if (cmp_status) {
  1936. pr_debug("%s:%s:%d status check: error\n",
  1937. skdev->name, __func__, __LINE__);
  1938. return SKD_CHECK_STATUS_REPORT_ERROR;
  1939. }
  1940. pr_debug("%s:%s:%d status check good default\n",
  1941. skdev->name, __func__, __LINE__);
  1942. return SKD_CHECK_STATUS_REPORT_GOOD;
  1943. }
  1944. static void skd_resolve_req_exception(struct skd_device *skdev,
  1945. struct skd_request_context *skreq)
  1946. {
  1947. u8 cmp_status = skreq->completion.status;
  1948. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  1949. case SKD_CHECK_STATUS_REPORT_GOOD:
  1950. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  1951. skd_end_request(skdev, skreq, 0);
  1952. break;
  1953. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  1954. skd_log_skreq(skdev, skreq, "retry(busy)");
  1955. blk_requeue_request(skdev->queue, skreq->req);
  1956. pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
  1957. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  1958. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  1959. skd_quiesce_dev(skdev);
  1960. break;
  1961. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  1962. if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
  1963. skd_log_skreq(skdev, skreq, "retry");
  1964. blk_requeue_request(skdev->queue, skreq->req);
  1965. break;
  1966. }
  1967. /* fall through to report error */
  1968. case SKD_CHECK_STATUS_REPORT_ERROR:
  1969. default:
  1970. skd_end_request(skdev, skreq, -EIO);
  1971. break;
  1972. }
  1973. }
  1974. /* assume spinlock is already held */
  1975. static void skd_release_skreq(struct skd_device *skdev,
  1976. struct skd_request_context *skreq)
  1977. {
  1978. u32 msg_slot;
  1979. struct skd_fitmsg_context *skmsg;
  1980. u32 timo_slot;
  1981. /*
  1982. * Reclaim the FIT msg buffer if this is
  1983. * the first of the requests it carried to
  1984. * be completed. The FIT msg buffer used to
  1985. * send this request cannot be reused until
  1986. * we are sure the s1120 card has copied
  1987. * it to its memory. The FIT msg might have
  1988. * contained several requests. As soon as
  1989. * any of them are completed we know that
  1990. * the entire FIT msg was transferred.
  1991. * Only the first completed request will
  1992. * match the FIT msg buffer id. The FIT
  1993. * msg buffer id is immediately updated.
  1994. * When subsequent requests complete the FIT
  1995. * msg buffer id won't match, so we know
  1996. * quite cheaply that it is already done.
  1997. */
  1998. msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
  1999. SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
  2000. skmsg = &skdev->skmsg_table[msg_slot];
  2001. if (skmsg->id == skreq->fitmsg_id) {
  2002. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
  2003. SKD_ASSERT(skmsg->outstanding > 0);
  2004. skmsg->outstanding--;
  2005. if (skmsg->outstanding == 0) {
  2006. skmsg->state = SKD_MSG_STATE_IDLE;
  2007. skmsg->id += SKD_ID_INCR;
  2008. skmsg->next = skdev->skmsg_free_list;
  2009. skdev->skmsg_free_list = skmsg;
  2010. }
  2011. }
  2012. /*
  2013. * Decrease the number of active requests.
  2014. * Also decrements the count in the timeout slot.
  2015. */
  2016. SKD_ASSERT(skdev->in_flight > 0);
  2017. skdev->in_flight -= 1;
  2018. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  2019. SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
  2020. skdev->timeout_slot[timo_slot] -= 1;
  2021. /*
  2022. * Reset backpointer
  2023. */
  2024. skreq->req = NULL;
  2025. /*
  2026. * Reclaim the skd_request_context
  2027. */
  2028. skreq->state = SKD_REQ_STATE_IDLE;
  2029. skreq->id += SKD_ID_INCR;
  2030. skreq->next = skdev->skreq_free_list;
  2031. skdev->skreq_free_list = skreq;
  2032. }
  2033. #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
  2034. static void skd_do_inq_page_00(struct skd_device *skdev,
  2035. volatile struct fit_completion_entry_v1 *skcomp,
  2036. volatile struct fit_comp_error_info *skerr,
  2037. uint8_t *cdb, uint8_t *buf)
  2038. {
  2039. uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
  2040. /* Caller requested "supported pages". The driver needs to insert
  2041. * its page.
  2042. */
  2043. pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
  2044. skdev->name, __func__, __LINE__);
  2045. /* If the device rejected the request because the CDB was
  2046. * improperly formed, then just leave.
  2047. */
  2048. if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
  2049. skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
  2050. return;
  2051. /* Get the amount of space the caller allocated */
  2052. max_bytes = (cdb[3] << 8) | cdb[4];
  2053. /* Get the number of pages actually returned by the device */
  2054. drive_pages = (buf[2] << 8) | buf[3];
  2055. drive_bytes = drive_pages + 4;
  2056. new_size = drive_pages + 1;
  2057. /* Supported pages must be in numerical order, so find where
  2058. * the driver page needs to be inserted into the list of
  2059. * pages returned by the device.
  2060. */
  2061. for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
  2062. if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
  2063. return; /* Device using this page code. abort */
  2064. else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
  2065. break;
  2066. }
  2067. if (insert_pt < max_bytes) {
  2068. uint16_t u;
  2069. /* Shift everything up one byte to make room. */
  2070. for (u = new_size + 3; u > insert_pt; u--)
  2071. buf[u] = buf[u - 1];
  2072. buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
  2073. /* SCSI byte order increment of num_returned_bytes by 1 */
  2074. skcomp->num_returned_bytes =
  2075. be32_to_cpu(skcomp->num_returned_bytes) + 1;
  2076. skcomp->num_returned_bytes =
  2077. be32_to_cpu(skcomp->num_returned_bytes);
  2078. }
  2079. /* update page length field to reflect the driver's page too */
  2080. buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
  2081. buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
  2082. }
  2083. static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
  2084. {
  2085. int pcie_reg;
  2086. u16 pci_bus_speed;
  2087. u8 pci_lanes;
  2088. pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  2089. if (pcie_reg) {
  2090. u16 linksta;
  2091. pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
  2092. pci_bus_speed = linksta & 0xF;
  2093. pci_lanes = (linksta & 0x3F0) >> 4;
  2094. } else {
  2095. *speed = STEC_LINK_UNKNOWN;
  2096. *width = 0xFF;
  2097. return;
  2098. }
  2099. switch (pci_bus_speed) {
  2100. case 1:
  2101. *speed = STEC_LINK_2_5GTS;
  2102. break;
  2103. case 2:
  2104. *speed = STEC_LINK_5GTS;
  2105. break;
  2106. case 3:
  2107. *speed = STEC_LINK_8GTS;
  2108. break;
  2109. default:
  2110. *speed = STEC_LINK_UNKNOWN;
  2111. break;
  2112. }
  2113. if (pci_lanes <= 0x20)
  2114. *width = pci_lanes;
  2115. else
  2116. *width = 0xFF;
  2117. }
  2118. static void skd_do_inq_page_da(struct skd_device *skdev,
  2119. volatile struct fit_completion_entry_v1 *skcomp,
  2120. volatile struct fit_comp_error_info *skerr,
  2121. uint8_t *cdb, uint8_t *buf)
  2122. {
  2123. struct pci_dev *pdev = skdev->pdev;
  2124. unsigned max_bytes;
  2125. struct driver_inquiry_data inq;
  2126. u16 val;
  2127. pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
  2128. skdev->name, __func__, __LINE__);
  2129. memset(&inq, 0, sizeof(inq));
  2130. inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
  2131. skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
  2132. inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
  2133. inq.pcie_device_number = PCI_SLOT(pdev->devfn);
  2134. inq.pcie_function_number = PCI_FUNC(pdev->devfn);
  2135. pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
  2136. inq.pcie_vendor_id = cpu_to_be16(val);
  2137. pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
  2138. inq.pcie_device_id = cpu_to_be16(val);
  2139. pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
  2140. inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
  2141. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
  2142. inq.pcie_subsystem_device_id = cpu_to_be16(val);
  2143. /* Driver version, fixed lenth, padded with spaces on the right */
  2144. inq.driver_version_length = sizeof(inq.driver_version);
  2145. memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
  2146. memcpy(inq.driver_version, DRV_VER_COMPL,
  2147. min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
  2148. inq.page_length = cpu_to_be16((sizeof(inq) - 4));
  2149. /* Clear the error set by the device */
  2150. skcomp->status = SAM_STAT_GOOD;
  2151. memset((void *)skerr, 0, sizeof(*skerr));
  2152. /* copy response into output buffer */
  2153. max_bytes = (cdb[3] << 8) | cdb[4];
  2154. memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
  2155. skcomp->num_returned_bytes =
  2156. be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
  2157. }
  2158. static void skd_do_driver_inq(struct skd_device *skdev,
  2159. volatile struct fit_completion_entry_v1 *skcomp,
  2160. volatile struct fit_comp_error_info *skerr,
  2161. uint8_t *cdb, uint8_t *buf)
  2162. {
  2163. if (!buf)
  2164. return;
  2165. else if (cdb[0] != INQUIRY)
  2166. return; /* Not an INQUIRY */
  2167. else if ((cdb[1] & 1) == 0)
  2168. return; /* EVPD not set */
  2169. else if (cdb[2] == 0)
  2170. /* Need to add driver's page to supported pages list */
  2171. skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
  2172. else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
  2173. /* Caller requested driver's page */
  2174. skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
  2175. }
  2176. static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
  2177. {
  2178. if (!sg)
  2179. return NULL;
  2180. if (!sg_page(sg))
  2181. return NULL;
  2182. return sg_virt(sg);
  2183. }
  2184. static void skd_process_scsi_inq(struct skd_device *skdev,
  2185. volatile struct fit_completion_entry_v1
  2186. *skcomp,
  2187. volatile struct fit_comp_error_info *skerr,
  2188. struct skd_special_context *skspcl)
  2189. {
  2190. uint8_t *buf;
  2191. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  2192. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  2193. dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
  2194. skspcl->req.sg_data_dir);
  2195. buf = skd_sg_1st_page_ptr(skspcl->req.sg);
  2196. if (buf)
  2197. skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
  2198. }
  2199. static int skd_isr_completion_posted(struct skd_device *skdev,
  2200. int limit, int *enqueued)
  2201. {
  2202. volatile struct fit_completion_entry_v1 *skcmp = NULL;
  2203. volatile struct fit_comp_error_info *skerr;
  2204. u16 req_id;
  2205. u32 req_slot;
  2206. struct skd_request_context *skreq;
  2207. u16 cmp_cntxt = 0;
  2208. u8 cmp_status = 0;
  2209. u8 cmp_cycle = 0;
  2210. u32 cmp_bytes = 0;
  2211. int rc = 0;
  2212. int processed = 0;
  2213. for (;; ) {
  2214. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  2215. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  2216. cmp_cycle = skcmp->cycle;
  2217. cmp_cntxt = skcmp->tag;
  2218. cmp_status = skcmp->status;
  2219. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  2220. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  2221. pr_debug("%s:%s:%d "
  2222. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
  2223. "busy=%d rbytes=0x%x proto=%d\n",
  2224. skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
  2225. skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
  2226. skdev->in_flight, cmp_bytes, skdev->proto_ver);
  2227. if (cmp_cycle != skdev->skcomp_cycle) {
  2228. pr_debug("%s:%s:%d end of completions\n",
  2229. skdev->name, __func__, __LINE__);
  2230. break;
  2231. }
  2232. /*
  2233. * Update the completion queue head index and possibly
  2234. * the completion cycle count. 8-bit wrap-around.
  2235. */
  2236. skdev->skcomp_ix++;
  2237. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  2238. skdev->skcomp_ix = 0;
  2239. skdev->skcomp_cycle++;
  2240. }
  2241. /*
  2242. * The command context is a unique 32-bit ID. The low order
  2243. * bits help locate the request. The request is usually a
  2244. * r/w request (see skd_start() above) or a special request.
  2245. */
  2246. req_id = cmp_cntxt;
  2247. req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  2248. /* Is this other than a r/w request? */
  2249. if (req_slot >= skdev->num_req_context) {
  2250. /*
  2251. * This is not a completion for a r/w request.
  2252. */
  2253. skd_complete_other(skdev, skcmp, skerr);
  2254. continue;
  2255. }
  2256. skreq = &skdev->skreq_table[req_slot];
  2257. /*
  2258. * Make sure the request ID for the slot matches.
  2259. */
  2260. if (skreq->id != req_id) {
  2261. pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
  2262. skdev->name, __func__, __LINE__,
  2263. req_id, skreq->id);
  2264. {
  2265. u16 new_id = cmp_cntxt;
  2266. pr_err("(%s): Completion mismatch "
  2267. "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  2268. skd_name(skdev), req_id,
  2269. skreq->id, new_id);
  2270. continue;
  2271. }
  2272. }
  2273. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  2274. if (skreq->state == SKD_REQ_STATE_ABORTED) {
  2275. pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
  2276. skdev->name, __func__, __LINE__,
  2277. skreq, skreq->id);
  2278. /* a previously timed out command can
  2279. * now be cleaned up */
  2280. skd_release_skreq(skdev, skreq);
  2281. continue;
  2282. }
  2283. skreq->completion = *skcmp;
  2284. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  2285. skreq->err_info = *skerr;
  2286. skd_log_check_status(skdev, cmp_status, skerr->key,
  2287. skerr->code, skerr->qual,
  2288. skerr->fruc);
  2289. }
  2290. /* Release DMA resources for the request. */
  2291. if (skreq->n_sg > 0)
  2292. skd_postop_sg_list(skdev, skreq);
  2293. if (!skreq->req) {
  2294. pr_debug("%s:%s:%d NULL backptr skdreq %p, "
  2295. "req=0x%x req_id=0x%x\n",
  2296. skdev->name, __func__, __LINE__,
  2297. skreq, skreq->id, req_id);
  2298. } else {
  2299. /*
  2300. * Capture the outcome and post it back to the
  2301. * native request.
  2302. */
  2303. if (likely(cmp_status == SAM_STAT_GOOD))
  2304. skd_end_request(skdev, skreq, 0);
  2305. else
  2306. skd_resolve_req_exception(skdev, skreq);
  2307. }
  2308. /*
  2309. * Release the skreq, its FIT msg (if one), timeout slot,
  2310. * and queue depth.
  2311. */
  2312. skd_release_skreq(skdev, skreq);
  2313. /* skd_isr_comp_limit equal zero means no limit */
  2314. if (limit) {
  2315. if (++processed >= limit) {
  2316. rc = 1;
  2317. break;
  2318. }
  2319. }
  2320. }
  2321. if ((skdev->state == SKD_DRVR_STATE_PAUSING)
  2322. && (skdev->in_flight) == 0) {
  2323. skdev->state = SKD_DRVR_STATE_PAUSED;
  2324. wake_up_interruptible(&skdev->waitq);
  2325. }
  2326. return rc;
  2327. }
  2328. static void skd_complete_other(struct skd_device *skdev,
  2329. volatile struct fit_completion_entry_v1 *skcomp,
  2330. volatile struct fit_comp_error_info *skerr)
  2331. {
  2332. u32 req_id = 0;
  2333. u32 req_table;
  2334. u32 req_slot;
  2335. struct skd_special_context *skspcl;
  2336. req_id = skcomp->tag;
  2337. req_table = req_id & SKD_ID_TABLE_MASK;
  2338. req_slot = req_id & SKD_ID_SLOT_MASK;
  2339. pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
  2340. skdev->name, __func__, __LINE__,
  2341. req_table, req_id, req_slot);
  2342. /*
  2343. * Based on the request id, determine how to dispatch this completion.
  2344. * This swich/case is finding the good cases and forwarding the
  2345. * completion entry. Errors are reported below the switch.
  2346. */
  2347. switch (req_table) {
  2348. case SKD_ID_RW_REQUEST:
  2349. /*
  2350. * The caller, skd_completion_posted_isr() above,
  2351. * handles r/w requests. The only way we get here
  2352. * is if the req_slot is out of bounds.
  2353. */
  2354. break;
  2355. case SKD_ID_SPECIAL_REQUEST:
  2356. /*
  2357. * Make sure the req_slot is in bounds and that the id
  2358. * matches.
  2359. */
  2360. if (req_slot < skdev->n_special) {
  2361. skspcl = &skdev->skspcl_table[req_slot];
  2362. if (skspcl->req.id == req_id &&
  2363. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2364. skd_complete_special(skdev,
  2365. skcomp, skerr, skspcl);
  2366. return;
  2367. }
  2368. }
  2369. break;
  2370. case SKD_ID_INTERNAL:
  2371. if (req_slot == 0) {
  2372. skspcl = &skdev->internal_skspcl;
  2373. if (skspcl->req.id == req_id &&
  2374. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2375. skd_complete_internal(skdev,
  2376. skcomp, skerr, skspcl);
  2377. return;
  2378. }
  2379. }
  2380. break;
  2381. case SKD_ID_FIT_MSG:
  2382. /*
  2383. * These id's should never appear in a completion record.
  2384. */
  2385. break;
  2386. default:
  2387. /*
  2388. * These id's should never appear anywhere;
  2389. */
  2390. break;
  2391. }
  2392. /*
  2393. * If we get here it is a bad or stale id.
  2394. */
  2395. }
  2396. static void skd_complete_special(struct skd_device *skdev,
  2397. volatile struct fit_completion_entry_v1
  2398. *skcomp,
  2399. volatile struct fit_comp_error_info *skerr,
  2400. struct skd_special_context *skspcl)
  2401. {
  2402. pr_debug("%s:%s:%d completing special request %p\n",
  2403. skdev->name, __func__, __LINE__, skspcl);
  2404. if (skspcl->orphaned) {
  2405. /* Discard orphaned request */
  2406. /* ?: Can this release directly or does it need
  2407. * to use a worker? */
  2408. pr_debug("%s:%s:%d release orphaned %p\n",
  2409. skdev->name, __func__, __LINE__, skspcl);
  2410. skd_release_special(skdev, skspcl);
  2411. return;
  2412. }
  2413. skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
  2414. skspcl->req.state = SKD_REQ_STATE_COMPLETED;
  2415. skspcl->req.completion = *skcomp;
  2416. skspcl->req.err_info = *skerr;
  2417. skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
  2418. skerr->code, skerr->qual, skerr->fruc);
  2419. wake_up_interruptible(&skdev->waitq);
  2420. }
  2421. /* assume spinlock is already held */
  2422. static void skd_release_special(struct skd_device *skdev,
  2423. struct skd_special_context *skspcl)
  2424. {
  2425. int i, was_depleted;
  2426. for (i = 0; i < skspcl->req.n_sg; i++) {
  2427. struct page *page = sg_page(&skspcl->req.sg[i]);
  2428. __free_page(page);
  2429. }
  2430. was_depleted = (skdev->skspcl_free_list == NULL);
  2431. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2432. skspcl->req.id += SKD_ID_INCR;
  2433. skspcl->req.next =
  2434. (struct skd_request_context *)skdev->skspcl_free_list;
  2435. skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
  2436. if (was_depleted) {
  2437. pr_debug("%s:%s:%d skspcl was depleted\n",
  2438. skdev->name, __func__, __LINE__);
  2439. /* Free list was depleted. Their might be waiters. */
  2440. wake_up_interruptible(&skdev->waitq);
  2441. }
  2442. }
  2443. static void skd_reset_skcomp(struct skd_device *skdev)
  2444. {
  2445. u32 nbytes;
  2446. struct fit_completion_entry_v1 *skcomp;
  2447. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  2448. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  2449. memset(skdev->skcomp_table, 0, nbytes);
  2450. skdev->skcomp_ix = 0;
  2451. skdev->skcomp_cycle = 1;
  2452. }
  2453. /*
  2454. *****************************************************************************
  2455. * INTERRUPTS
  2456. *****************************************************************************
  2457. */
  2458. static void skd_completion_worker(struct work_struct *work)
  2459. {
  2460. struct skd_device *skdev =
  2461. container_of(work, struct skd_device, completion_worker);
  2462. unsigned long flags;
  2463. int flush_enqueued = 0;
  2464. spin_lock_irqsave(&skdev->lock, flags);
  2465. /*
  2466. * pass in limit=0, which means no limit..
  2467. * process everything in compq
  2468. */
  2469. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  2470. skd_request_fn(skdev->queue);
  2471. spin_unlock_irqrestore(&skdev->lock, flags);
  2472. }
  2473. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  2474. irqreturn_t
  2475. static skd_isr(int irq, void *ptr)
  2476. {
  2477. struct skd_device *skdev;
  2478. u32 intstat;
  2479. u32 ack;
  2480. int rc = 0;
  2481. int deferred = 0;
  2482. int flush_enqueued = 0;
  2483. skdev = (struct skd_device *)ptr;
  2484. spin_lock(&skdev->lock);
  2485. for (;; ) {
  2486. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2487. ack = FIT_INT_DEF_MASK;
  2488. ack &= intstat;
  2489. pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
  2490. skdev->name, __func__, __LINE__, intstat, ack);
  2491. /* As long as there is an int pending on device, keep
  2492. * running loop. When none, get out, but if we've never
  2493. * done any processing, call completion handler?
  2494. */
  2495. if (ack == 0) {
  2496. /* No interrupts on device, but run the completion
  2497. * processor anyway?
  2498. */
  2499. if (rc == 0)
  2500. if (likely (skdev->state
  2501. == SKD_DRVR_STATE_ONLINE))
  2502. deferred = 1;
  2503. break;
  2504. }
  2505. rc = IRQ_HANDLED;
  2506. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  2507. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  2508. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  2509. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  2510. /*
  2511. * If we have already deferred completion
  2512. * processing, don't bother running it again
  2513. */
  2514. if (deferred == 0)
  2515. deferred =
  2516. skd_isr_completion_posted(skdev,
  2517. skd_isr_comp_limit, &flush_enqueued);
  2518. }
  2519. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  2520. skd_isr_fwstate(skdev);
  2521. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  2522. skdev->state ==
  2523. SKD_DRVR_STATE_DISAPPEARED) {
  2524. spin_unlock(&skdev->lock);
  2525. return rc;
  2526. }
  2527. }
  2528. if (intstat & FIT_ISH_MSG_FROM_DEV)
  2529. skd_isr_msg_from_dev(skdev);
  2530. }
  2531. }
  2532. if (unlikely(flush_enqueued))
  2533. skd_request_fn(skdev->queue);
  2534. if (deferred)
  2535. schedule_work(&skdev->completion_worker);
  2536. else if (!flush_enqueued)
  2537. skd_request_fn(skdev->queue);
  2538. spin_unlock(&skdev->lock);
  2539. return rc;
  2540. }
  2541. static void skd_drive_fault(struct skd_device *skdev)
  2542. {
  2543. skdev->state = SKD_DRVR_STATE_FAULT;
  2544. pr_err("(%s): Drive FAULT\n", skd_name(skdev));
  2545. }
  2546. static void skd_drive_disappeared(struct skd_device *skdev)
  2547. {
  2548. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  2549. pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
  2550. }
  2551. static void skd_isr_fwstate(struct skd_device *skdev)
  2552. {
  2553. u32 sense;
  2554. u32 state;
  2555. u32 mtd;
  2556. int prev_driver_state = skdev->state;
  2557. sense = SKD_READL(skdev, FIT_STATUS);
  2558. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2559. pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
  2560. skd_name(skdev),
  2561. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2562. skd_drive_state_to_str(state), state);
  2563. skdev->drive_state = state;
  2564. switch (skdev->drive_state) {
  2565. case FIT_SR_DRIVE_INIT:
  2566. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  2567. skd_disable_interrupts(skdev);
  2568. break;
  2569. }
  2570. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  2571. skd_recover_requests(skdev, 0);
  2572. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  2573. skdev->timer_countdown = SKD_STARTING_TIMO;
  2574. skdev->state = SKD_DRVR_STATE_STARTING;
  2575. skd_soft_reset(skdev);
  2576. break;
  2577. }
  2578. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  2579. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2580. skdev->last_mtd = mtd;
  2581. break;
  2582. case FIT_SR_DRIVE_ONLINE:
  2583. skdev->cur_max_queue_depth = skd_max_queue_depth;
  2584. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  2585. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  2586. skdev->queue_low_water_mark =
  2587. skdev->cur_max_queue_depth * 2 / 3 + 1;
  2588. if (skdev->queue_low_water_mark < 1)
  2589. skdev->queue_low_water_mark = 1;
  2590. pr_info(
  2591. "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
  2592. skd_name(skdev),
  2593. skdev->cur_max_queue_depth,
  2594. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2595. skd_refresh_device_data(skdev);
  2596. break;
  2597. case FIT_SR_DRIVE_BUSY:
  2598. skdev->state = SKD_DRVR_STATE_BUSY;
  2599. skdev->timer_countdown = SKD_BUSY_TIMO;
  2600. skd_quiesce_dev(skdev);
  2601. break;
  2602. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2603. /* set timer for 3 seconds, we'll abort any unfinished
  2604. * commands after that expires
  2605. */
  2606. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2607. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  2608. blk_start_queue(skdev->queue);
  2609. break;
  2610. case FIT_SR_DRIVE_BUSY_ERASE:
  2611. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2612. skdev->timer_countdown = SKD_BUSY_TIMO;
  2613. break;
  2614. case FIT_SR_DRIVE_OFFLINE:
  2615. skdev->state = SKD_DRVR_STATE_IDLE;
  2616. break;
  2617. case FIT_SR_DRIVE_SOFT_RESET:
  2618. switch (skdev->state) {
  2619. case SKD_DRVR_STATE_STARTING:
  2620. case SKD_DRVR_STATE_RESTARTING:
  2621. /* Expected by a caller of skd_soft_reset() */
  2622. break;
  2623. default:
  2624. skdev->state = SKD_DRVR_STATE_RESTARTING;
  2625. break;
  2626. }
  2627. break;
  2628. case FIT_SR_DRIVE_FW_BOOTING:
  2629. pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
  2630. skdev->name, __func__, __LINE__, skdev->name);
  2631. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2632. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2633. break;
  2634. case FIT_SR_DRIVE_DEGRADED:
  2635. case FIT_SR_PCIE_LINK_DOWN:
  2636. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2637. break;
  2638. case FIT_SR_DRIVE_FAULT:
  2639. skd_drive_fault(skdev);
  2640. skd_recover_requests(skdev, 0);
  2641. blk_start_queue(skdev->queue);
  2642. break;
  2643. /* PCIe bus returned all Fs? */
  2644. case 0xFF:
  2645. pr_info("(%s): state=0x%x sense=0x%x\n",
  2646. skd_name(skdev), state, sense);
  2647. skd_drive_disappeared(skdev);
  2648. skd_recover_requests(skdev, 0);
  2649. blk_start_queue(skdev->queue);
  2650. break;
  2651. default:
  2652. /*
  2653. * Uknown FW State. Wait for a state we recognize.
  2654. */
  2655. break;
  2656. }
  2657. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  2658. skd_name(skdev),
  2659. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  2660. skd_skdev_state_to_str(skdev->state), skdev->state);
  2661. }
  2662. static void skd_recover_requests(struct skd_device *skdev, int requeue)
  2663. {
  2664. int i;
  2665. for (i = 0; i < skdev->num_req_context; i++) {
  2666. struct skd_request_context *skreq = &skdev->skreq_table[i];
  2667. if (skreq->state == SKD_REQ_STATE_BUSY) {
  2668. skd_log_skreq(skdev, skreq, "recover");
  2669. SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
  2670. SKD_ASSERT(skreq->req != NULL);
  2671. /* Release DMA resources for the request. */
  2672. if (skreq->n_sg > 0)
  2673. skd_postop_sg_list(skdev, skreq);
  2674. if (requeue &&
  2675. (unsigned long) ++skreq->req->special <
  2676. SKD_MAX_RETRIES)
  2677. blk_requeue_request(skdev->queue, skreq->req);
  2678. else
  2679. skd_end_request(skdev, skreq, -EIO);
  2680. skreq->req = NULL;
  2681. skreq->state = SKD_REQ_STATE_IDLE;
  2682. skreq->id += SKD_ID_INCR;
  2683. }
  2684. if (i > 0)
  2685. skreq[-1].next = skreq;
  2686. skreq->next = NULL;
  2687. }
  2688. skdev->skreq_free_list = skdev->skreq_table;
  2689. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2690. struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
  2691. if (skmsg->state == SKD_MSG_STATE_BUSY) {
  2692. skd_log_skmsg(skdev, skmsg, "salvaged");
  2693. SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
  2694. skmsg->state = SKD_MSG_STATE_IDLE;
  2695. skmsg->id += SKD_ID_INCR;
  2696. }
  2697. if (i > 0)
  2698. skmsg[-1].next = skmsg;
  2699. skmsg->next = NULL;
  2700. }
  2701. skdev->skmsg_free_list = skdev->skmsg_table;
  2702. for (i = 0; i < skdev->n_special; i++) {
  2703. struct skd_special_context *skspcl = &skdev->skspcl_table[i];
  2704. /* If orphaned, reclaim it because it has already been reported
  2705. * to the process as an error (it was just waiting for
  2706. * a completion that didn't come, and now it will never come)
  2707. * If busy, change to a state that will cause it to error
  2708. * out in the wait routine and let it do the normal
  2709. * reporting and reclaiming
  2710. */
  2711. if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2712. if (skspcl->orphaned) {
  2713. pr_debug("%s:%s:%d orphaned %p\n",
  2714. skdev->name, __func__, __LINE__,
  2715. skspcl);
  2716. skd_release_special(skdev, skspcl);
  2717. } else {
  2718. pr_debug("%s:%s:%d not orphaned %p\n",
  2719. skdev->name, __func__, __LINE__,
  2720. skspcl);
  2721. skspcl->req.state = SKD_REQ_STATE_ABORTED;
  2722. }
  2723. }
  2724. }
  2725. skdev->skspcl_free_list = skdev->skspcl_table;
  2726. for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
  2727. skdev->timeout_slot[i] = 0;
  2728. skdev->in_flight = 0;
  2729. }
  2730. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  2731. {
  2732. u32 mfd;
  2733. u32 mtd;
  2734. u32 data;
  2735. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2736. pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
  2737. skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
  2738. /* ignore any mtd that is an ack for something we didn't send */
  2739. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  2740. return;
  2741. switch (FIT_MXD_TYPE(mfd)) {
  2742. case FIT_MTD_FITFW_INIT:
  2743. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  2744. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  2745. pr_err("(%s): protocol mismatch\n",
  2746. skdev->name);
  2747. pr_err("(%s): got=%d support=%d\n",
  2748. skdev->name, skdev->proto_ver,
  2749. FIT_PROTOCOL_VERSION_1);
  2750. pr_err("(%s): please upgrade driver\n",
  2751. skdev->name);
  2752. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  2753. skd_soft_reset(skdev);
  2754. break;
  2755. }
  2756. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  2757. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2758. skdev->last_mtd = mtd;
  2759. break;
  2760. case FIT_MTD_GET_CMDQ_DEPTH:
  2761. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  2762. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  2763. SKD_N_COMPLETION_ENTRY);
  2764. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2765. skdev->last_mtd = mtd;
  2766. break;
  2767. case FIT_MTD_SET_COMPQ_DEPTH:
  2768. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  2769. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  2770. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2771. skdev->last_mtd = mtd;
  2772. break;
  2773. case FIT_MTD_SET_COMPQ_ADDR:
  2774. skd_reset_skcomp(skdev);
  2775. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  2776. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2777. skdev->last_mtd = mtd;
  2778. break;
  2779. case FIT_MTD_CMD_LOG_HOST_ID:
  2780. skdev->connect_time_stamp = get_seconds();
  2781. data = skdev->connect_time_stamp & 0xFFFF;
  2782. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  2783. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2784. skdev->last_mtd = mtd;
  2785. break;
  2786. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  2787. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  2788. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  2789. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  2790. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2791. skdev->last_mtd = mtd;
  2792. break;
  2793. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  2794. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  2795. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  2796. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2797. skdev->last_mtd = mtd;
  2798. pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
  2799. skd_name(skdev),
  2800. skdev->connect_time_stamp, skdev->drive_jiffies);
  2801. break;
  2802. case FIT_MTD_ARM_QUEUE:
  2803. skdev->last_mtd = 0;
  2804. /*
  2805. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  2806. */
  2807. break;
  2808. default:
  2809. break;
  2810. }
  2811. }
  2812. static void skd_disable_interrupts(struct skd_device *skdev)
  2813. {
  2814. u32 sense;
  2815. sense = SKD_READL(skdev, FIT_CONTROL);
  2816. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  2817. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  2818. pr_debug("%s:%s:%d sense 0x%x\n",
  2819. skdev->name, __func__, __LINE__, sense);
  2820. /* Note that the 1s is written. A 1-bit means
  2821. * disable, a 0 means enable.
  2822. */
  2823. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  2824. }
  2825. static void skd_enable_interrupts(struct skd_device *skdev)
  2826. {
  2827. u32 val;
  2828. /* unmask interrupts first */
  2829. val = FIT_ISH_FW_STATE_CHANGE +
  2830. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  2831. /* Note that the compliment of mask is written. A 1-bit means
  2832. * disable, a 0 means enable. */
  2833. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  2834. pr_debug("%s:%s:%d interrupt mask=0x%x\n",
  2835. skdev->name, __func__, __LINE__, ~val);
  2836. val = SKD_READL(skdev, FIT_CONTROL);
  2837. val |= FIT_CR_ENABLE_INTERRUPTS;
  2838. pr_debug("%s:%s:%d control=0x%x\n",
  2839. skdev->name, __func__, __LINE__, val);
  2840. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2841. }
  2842. /*
  2843. *****************************************************************************
  2844. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  2845. *****************************************************************************
  2846. */
  2847. static void skd_soft_reset(struct skd_device *skdev)
  2848. {
  2849. u32 val;
  2850. val = SKD_READL(skdev, FIT_CONTROL);
  2851. val |= (FIT_CR_SOFT_RESET);
  2852. pr_debug("%s:%s:%d control=0x%x\n",
  2853. skdev->name, __func__, __LINE__, val);
  2854. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2855. }
  2856. static void skd_start_device(struct skd_device *skdev)
  2857. {
  2858. unsigned long flags;
  2859. u32 sense;
  2860. u32 state;
  2861. spin_lock_irqsave(&skdev->lock, flags);
  2862. /* ack all ghost interrupts */
  2863. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2864. sense = SKD_READL(skdev, FIT_STATUS);
  2865. pr_debug("%s:%s:%d initial status=0x%x\n",
  2866. skdev->name, __func__, __LINE__, sense);
  2867. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2868. skdev->drive_state = state;
  2869. skdev->last_mtd = 0;
  2870. skdev->state = SKD_DRVR_STATE_STARTING;
  2871. skdev->timer_countdown = SKD_STARTING_TIMO;
  2872. skd_enable_interrupts(skdev);
  2873. switch (skdev->drive_state) {
  2874. case FIT_SR_DRIVE_OFFLINE:
  2875. pr_err("(%s): Drive offline...\n", skd_name(skdev));
  2876. break;
  2877. case FIT_SR_DRIVE_FW_BOOTING:
  2878. pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
  2879. skdev->name, __func__, __LINE__, skdev->name);
  2880. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2881. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2882. break;
  2883. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2884. pr_info("(%s): Start: BUSY_SANITIZE\n",
  2885. skd_name(skdev));
  2886. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2887. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2888. break;
  2889. case FIT_SR_DRIVE_BUSY_ERASE:
  2890. pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
  2891. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2892. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2893. break;
  2894. case FIT_SR_DRIVE_INIT:
  2895. case FIT_SR_DRIVE_ONLINE:
  2896. skd_soft_reset(skdev);
  2897. break;
  2898. case FIT_SR_DRIVE_BUSY:
  2899. pr_err("(%s): Drive Busy...\n", skd_name(skdev));
  2900. skdev->state = SKD_DRVR_STATE_BUSY;
  2901. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2902. break;
  2903. case FIT_SR_DRIVE_SOFT_RESET:
  2904. pr_err("(%s) drive soft reset in prog\n",
  2905. skd_name(skdev));
  2906. break;
  2907. case FIT_SR_DRIVE_FAULT:
  2908. /* Fault state is bad...soft reset won't do it...
  2909. * Hard reset, maybe, but does it work on device?
  2910. * For now, just fault so the system doesn't hang.
  2911. */
  2912. skd_drive_fault(skdev);
  2913. /*start the queue so we can respond with error to requests */
  2914. pr_debug("%s:%s:%d starting %s queue\n",
  2915. skdev->name, __func__, __LINE__, skdev->name);
  2916. blk_start_queue(skdev->queue);
  2917. skdev->gendisk_on = -1;
  2918. wake_up_interruptible(&skdev->waitq);
  2919. break;
  2920. case 0xFF:
  2921. /* Most likely the device isn't there or isn't responding
  2922. * to the BAR1 addresses. */
  2923. skd_drive_disappeared(skdev);
  2924. /*start the queue so we can respond with error to requests */
  2925. pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
  2926. skdev->name, __func__, __LINE__, skdev->name);
  2927. blk_start_queue(skdev->queue);
  2928. skdev->gendisk_on = -1;
  2929. wake_up_interruptible(&skdev->waitq);
  2930. break;
  2931. default:
  2932. pr_err("(%s) Start: unknown state %x\n",
  2933. skd_name(skdev), skdev->drive_state);
  2934. break;
  2935. }
  2936. state = SKD_READL(skdev, FIT_CONTROL);
  2937. pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
  2938. skdev->name, __func__, __LINE__, state);
  2939. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2940. pr_debug("%s:%s:%d Intr Status=0x%x\n",
  2941. skdev->name, __func__, __LINE__, state);
  2942. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  2943. pr_debug("%s:%s:%d Intr Mask=0x%x\n",
  2944. skdev->name, __func__, __LINE__, state);
  2945. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2946. pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
  2947. skdev->name, __func__, __LINE__, state);
  2948. state = SKD_READL(skdev, FIT_HW_VERSION);
  2949. pr_debug("%s:%s:%d HW version=0x%x\n",
  2950. skdev->name, __func__, __LINE__, state);
  2951. spin_unlock_irqrestore(&skdev->lock, flags);
  2952. }
  2953. static void skd_stop_device(struct skd_device *skdev)
  2954. {
  2955. unsigned long flags;
  2956. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  2957. u32 dev_state;
  2958. int i;
  2959. spin_lock_irqsave(&skdev->lock, flags);
  2960. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  2961. pr_err("(%s): skd_stop_device not online no sync\n",
  2962. skd_name(skdev));
  2963. goto stop_out;
  2964. }
  2965. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  2966. pr_err("(%s): skd_stop_device no special\n",
  2967. skd_name(skdev));
  2968. goto stop_out;
  2969. }
  2970. skdev->state = SKD_DRVR_STATE_SYNCING;
  2971. skdev->sync_done = 0;
  2972. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  2973. spin_unlock_irqrestore(&skdev->lock, flags);
  2974. wait_event_interruptible_timeout(skdev->waitq,
  2975. (skdev->sync_done), (10 * HZ));
  2976. spin_lock_irqsave(&skdev->lock, flags);
  2977. switch (skdev->sync_done) {
  2978. case 0:
  2979. pr_err("(%s): skd_stop_device no sync\n",
  2980. skd_name(skdev));
  2981. break;
  2982. case 1:
  2983. pr_err("(%s): skd_stop_device sync done\n",
  2984. skd_name(skdev));
  2985. break;
  2986. default:
  2987. pr_err("(%s): skd_stop_device sync error\n",
  2988. skd_name(skdev));
  2989. }
  2990. stop_out:
  2991. skdev->state = SKD_DRVR_STATE_STOPPING;
  2992. spin_unlock_irqrestore(&skdev->lock, flags);
  2993. skd_kill_timer(skdev);
  2994. spin_lock_irqsave(&skdev->lock, flags);
  2995. skd_disable_interrupts(skdev);
  2996. /* ensure all ints on device are cleared */
  2997. /* soft reset the device to unload with a clean slate */
  2998. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2999. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  3000. spin_unlock_irqrestore(&skdev->lock, flags);
  3001. /* poll every 100ms, 1 second timeout */
  3002. for (i = 0; i < 10; i++) {
  3003. dev_state =
  3004. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  3005. if (dev_state == FIT_SR_DRIVE_INIT)
  3006. break;
  3007. set_current_state(TASK_INTERRUPTIBLE);
  3008. schedule_timeout(msecs_to_jiffies(100));
  3009. }
  3010. if (dev_state != FIT_SR_DRIVE_INIT)
  3011. pr_err("(%s): skd_stop_device state error 0x%02x\n",
  3012. skd_name(skdev), dev_state);
  3013. }
  3014. /* assume spinlock is held */
  3015. static void skd_restart_device(struct skd_device *skdev)
  3016. {
  3017. u32 state;
  3018. /* ack all ghost interrupts */
  3019. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3020. state = SKD_READL(skdev, FIT_STATUS);
  3021. pr_debug("%s:%s:%d drive status=0x%x\n",
  3022. skdev->name, __func__, __LINE__, state);
  3023. state &= FIT_SR_DRIVE_STATE_MASK;
  3024. skdev->drive_state = state;
  3025. skdev->last_mtd = 0;
  3026. skdev->state = SKD_DRVR_STATE_RESTARTING;
  3027. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  3028. skd_soft_reset(skdev);
  3029. }
  3030. /* assume spinlock is held */
  3031. static int skd_quiesce_dev(struct skd_device *skdev)
  3032. {
  3033. int rc = 0;
  3034. switch (skdev->state) {
  3035. case SKD_DRVR_STATE_BUSY:
  3036. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3037. pr_debug("%s:%s:%d stopping %s queue\n",
  3038. skdev->name, __func__, __LINE__, skdev->name);
  3039. blk_stop_queue(skdev->queue);
  3040. break;
  3041. case SKD_DRVR_STATE_ONLINE:
  3042. case SKD_DRVR_STATE_STOPPING:
  3043. case SKD_DRVR_STATE_SYNCING:
  3044. case SKD_DRVR_STATE_PAUSING:
  3045. case SKD_DRVR_STATE_PAUSED:
  3046. case SKD_DRVR_STATE_STARTING:
  3047. case SKD_DRVR_STATE_RESTARTING:
  3048. case SKD_DRVR_STATE_RESUMING:
  3049. default:
  3050. rc = -EINVAL;
  3051. pr_debug("%s:%s:%d state [%d] not implemented\n",
  3052. skdev->name, __func__, __LINE__, skdev->state);
  3053. }
  3054. return rc;
  3055. }
  3056. /* assume spinlock is held */
  3057. static int skd_unquiesce_dev(struct skd_device *skdev)
  3058. {
  3059. int prev_driver_state = skdev->state;
  3060. skd_log_skdev(skdev, "unquiesce");
  3061. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  3062. pr_debug("%s:%s:%d **** device already ONLINE\n",
  3063. skdev->name, __func__, __LINE__);
  3064. return 0;
  3065. }
  3066. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  3067. /*
  3068. * If there has been an state change to other than
  3069. * ONLINE, we will rely on controller state change
  3070. * to come back online and restart the queue.
  3071. * The BUSY state means that driver is ready to
  3072. * continue normal processing but waiting for controller
  3073. * to become available.
  3074. */
  3075. skdev->state = SKD_DRVR_STATE_BUSY;
  3076. pr_debug("%s:%s:%d drive BUSY state\n",
  3077. skdev->name, __func__, __LINE__);
  3078. return 0;
  3079. }
  3080. /*
  3081. * Drive has just come online, driver is either in startup,
  3082. * paused performing a task, or bust waiting for hardware.
  3083. */
  3084. switch (skdev->state) {
  3085. case SKD_DRVR_STATE_PAUSED:
  3086. case SKD_DRVR_STATE_BUSY:
  3087. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3088. case SKD_DRVR_STATE_BUSY_ERASE:
  3089. case SKD_DRVR_STATE_STARTING:
  3090. case SKD_DRVR_STATE_RESTARTING:
  3091. case SKD_DRVR_STATE_FAULT:
  3092. case SKD_DRVR_STATE_IDLE:
  3093. case SKD_DRVR_STATE_LOAD:
  3094. skdev->state = SKD_DRVR_STATE_ONLINE;
  3095. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  3096. skd_name(skdev),
  3097. skd_skdev_state_to_str(prev_driver_state),
  3098. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  3099. skdev->state);
  3100. pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
  3101. skdev->name, __func__, __LINE__);
  3102. pr_debug("%s:%s:%d starting %s queue\n",
  3103. skdev->name, __func__, __LINE__, skdev->name);
  3104. pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
  3105. blk_start_queue(skdev->queue);
  3106. skdev->gendisk_on = 1;
  3107. wake_up_interruptible(&skdev->waitq);
  3108. break;
  3109. case SKD_DRVR_STATE_DISAPPEARED:
  3110. default:
  3111. pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
  3112. skdev->name, __func__, __LINE__,
  3113. skdev->state);
  3114. return -EBUSY;
  3115. }
  3116. return 0;
  3117. }
  3118. /*
  3119. *****************************************************************************
  3120. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  3121. *****************************************************************************
  3122. */
  3123. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  3124. {
  3125. struct skd_device *skdev = skd_host_data;
  3126. unsigned long flags;
  3127. spin_lock_irqsave(&skdev->lock, flags);
  3128. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3129. skdev->name, __func__, __LINE__,
  3130. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3131. pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
  3132. irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3133. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  3134. spin_unlock_irqrestore(&skdev->lock, flags);
  3135. return IRQ_HANDLED;
  3136. }
  3137. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  3138. {
  3139. struct skd_device *skdev = skd_host_data;
  3140. unsigned long flags;
  3141. spin_lock_irqsave(&skdev->lock, flags);
  3142. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3143. skdev->name, __func__, __LINE__,
  3144. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3145. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  3146. skd_isr_fwstate(skdev);
  3147. spin_unlock_irqrestore(&skdev->lock, flags);
  3148. return IRQ_HANDLED;
  3149. }
  3150. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  3151. {
  3152. struct skd_device *skdev = skd_host_data;
  3153. unsigned long flags;
  3154. int flush_enqueued = 0;
  3155. int deferred;
  3156. spin_lock_irqsave(&skdev->lock, flags);
  3157. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3158. skdev->name, __func__, __LINE__,
  3159. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3160. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  3161. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  3162. &flush_enqueued);
  3163. if (flush_enqueued)
  3164. skd_request_fn(skdev->queue);
  3165. if (deferred)
  3166. schedule_work(&skdev->completion_worker);
  3167. else if (!flush_enqueued)
  3168. skd_request_fn(skdev->queue);
  3169. spin_unlock_irqrestore(&skdev->lock, flags);
  3170. return IRQ_HANDLED;
  3171. }
  3172. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  3173. {
  3174. struct skd_device *skdev = skd_host_data;
  3175. unsigned long flags;
  3176. spin_lock_irqsave(&skdev->lock, flags);
  3177. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3178. skdev->name, __func__, __LINE__,
  3179. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3180. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  3181. skd_isr_msg_from_dev(skdev);
  3182. spin_unlock_irqrestore(&skdev->lock, flags);
  3183. return IRQ_HANDLED;
  3184. }
  3185. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  3186. {
  3187. struct skd_device *skdev = skd_host_data;
  3188. unsigned long flags;
  3189. spin_lock_irqsave(&skdev->lock, flags);
  3190. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3191. skdev->name, __func__, __LINE__,
  3192. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3193. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  3194. spin_unlock_irqrestore(&skdev->lock, flags);
  3195. return IRQ_HANDLED;
  3196. }
  3197. /*
  3198. *****************************************************************************
  3199. * PCIe MSI/MSI-X SETUP
  3200. *****************************************************************************
  3201. */
  3202. struct skd_msix_entry {
  3203. int have_irq;
  3204. u32 vector;
  3205. u32 entry;
  3206. struct skd_device *rsp;
  3207. char isr_name[30];
  3208. };
  3209. struct skd_init_msix_entry {
  3210. const char *name;
  3211. irq_handler_t handler;
  3212. };
  3213. #define SKD_MAX_MSIX_COUNT 13
  3214. #define SKD_MIN_MSIX_COUNT 7
  3215. #define SKD_BASE_MSIX_IRQ 4
  3216. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  3217. { "(DMA 0)", skd_reserved_isr },
  3218. { "(DMA 1)", skd_reserved_isr },
  3219. { "(DMA 2)", skd_reserved_isr },
  3220. { "(DMA 3)", skd_reserved_isr },
  3221. { "(State Change)", skd_statec_isr },
  3222. { "(COMPL_Q)", skd_comp_q },
  3223. { "(MSG)", skd_msg_isr },
  3224. { "(Reserved)", skd_reserved_isr },
  3225. { "(Reserved)", skd_reserved_isr },
  3226. { "(Queue Full 0)", skd_qfull_isr },
  3227. { "(Queue Full 1)", skd_qfull_isr },
  3228. { "(Queue Full 2)", skd_qfull_isr },
  3229. { "(Queue Full 3)", skd_qfull_isr },
  3230. };
  3231. static void skd_release_msix(struct skd_device *skdev)
  3232. {
  3233. struct skd_msix_entry *qentry;
  3234. int i;
  3235. if (skdev->msix_entries) {
  3236. for (i = 0; i < skdev->msix_count; i++) {
  3237. qentry = &skdev->msix_entries[i];
  3238. skdev = qentry->rsp;
  3239. if (qentry->have_irq)
  3240. devm_free_irq(&skdev->pdev->dev,
  3241. qentry->vector, qentry->rsp);
  3242. }
  3243. kfree(skdev->msix_entries);
  3244. }
  3245. if (skdev->msix_count)
  3246. pci_disable_msix(skdev->pdev);
  3247. skdev->msix_count = 0;
  3248. skdev->msix_entries = NULL;
  3249. }
  3250. static int skd_acquire_msix(struct skd_device *skdev)
  3251. {
  3252. int i, rc;
  3253. struct pci_dev *pdev = skdev->pdev;
  3254. struct msix_entry *entries;
  3255. struct skd_msix_entry *qentry;
  3256. entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
  3257. GFP_KERNEL);
  3258. if (!entries)
  3259. return -ENOMEM;
  3260. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
  3261. entries[i].entry = i;
  3262. rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
  3263. if (rc) {
  3264. pr_err("(%s): failed to enable MSI-X %d\n",
  3265. skd_name(skdev), rc);
  3266. goto msix_out;
  3267. }
  3268. skdev->msix_count = SKD_MAX_MSIX_COUNT;
  3269. skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
  3270. skdev->msix_count, GFP_KERNEL);
  3271. if (!skdev->msix_entries) {
  3272. rc = -ENOMEM;
  3273. pr_err("(%s): msix table allocation error\n",
  3274. skd_name(skdev));
  3275. goto msix_out;
  3276. }
  3277. for (i = 0; i < skdev->msix_count; i++) {
  3278. qentry = &skdev->msix_entries[i];
  3279. qentry->vector = entries[i].vector;
  3280. qentry->entry = entries[i].entry;
  3281. qentry->rsp = NULL;
  3282. qentry->have_irq = 0;
  3283. pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
  3284. skdev->name, __func__, __LINE__,
  3285. pci_name(pdev), skdev->name,
  3286. i, qentry->vector, qentry->entry);
  3287. }
  3288. /* Enable MSI-X vectors for the base queue */
  3289. for (i = 0; i < skdev->msix_count; i++) {
  3290. qentry = &skdev->msix_entries[i];
  3291. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  3292. "%s%d-msix %s", DRV_NAME, skdev->devno,
  3293. msix_entries[i].name);
  3294. rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
  3295. msix_entries[i].handler, 0,
  3296. qentry->isr_name, skdev);
  3297. if (rc) {
  3298. pr_err("(%s): Unable to register(%d) MSI-X "
  3299. "handler %d: %s\n",
  3300. skd_name(skdev), rc, i, qentry->isr_name);
  3301. goto msix_out;
  3302. } else {
  3303. qentry->have_irq = 1;
  3304. qentry->rsp = skdev;
  3305. }
  3306. }
  3307. pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
  3308. skdev->name, __func__, __LINE__,
  3309. pci_name(pdev), skdev->name, skdev->msix_count);
  3310. return 0;
  3311. msix_out:
  3312. if (entries)
  3313. kfree(entries);
  3314. skd_release_msix(skdev);
  3315. return rc;
  3316. }
  3317. static int skd_acquire_irq(struct skd_device *skdev)
  3318. {
  3319. int rc;
  3320. struct pci_dev *pdev;
  3321. pdev = skdev->pdev;
  3322. skdev->msix_count = 0;
  3323. RETRY_IRQ_TYPE:
  3324. switch (skdev->irq_type) {
  3325. case SKD_IRQ_MSIX:
  3326. rc = skd_acquire_msix(skdev);
  3327. if (!rc)
  3328. pr_info("(%s): MSI-X %d irqs enabled\n",
  3329. skd_name(skdev), skdev->msix_count);
  3330. else {
  3331. pr_err(
  3332. "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
  3333. skd_name(skdev), rc);
  3334. skdev->irq_type = SKD_IRQ_MSI;
  3335. goto RETRY_IRQ_TYPE;
  3336. }
  3337. break;
  3338. case SKD_IRQ_MSI:
  3339. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
  3340. DRV_NAME, skdev->devno);
  3341. rc = pci_enable_msi_range(pdev, 1, 1);
  3342. if (rc > 0) {
  3343. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
  3344. skdev->isr_name, skdev);
  3345. if (rc) {
  3346. pci_disable_msi(pdev);
  3347. pr_err(
  3348. "(%s): failed to allocate the MSI interrupt %d\n",
  3349. skd_name(skdev), rc);
  3350. goto RETRY_IRQ_LEGACY;
  3351. }
  3352. pr_info("(%s): MSI irq %d enabled\n",
  3353. skd_name(skdev), pdev->irq);
  3354. } else {
  3355. RETRY_IRQ_LEGACY:
  3356. pr_err(
  3357. "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
  3358. skd_name(skdev), rc);
  3359. skdev->irq_type = SKD_IRQ_LEGACY;
  3360. goto RETRY_IRQ_TYPE;
  3361. }
  3362. break;
  3363. case SKD_IRQ_LEGACY:
  3364. snprintf(skdev->isr_name, sizeof(skdev->isr_name),
  3365. "%s%d-legacy", DRV_NAME, skdev->devno);
  3366. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  3367. IRQF_SHARED, skdev->isr_name, skdev);
  3368. if (!rc)
  3369. pr_info("(%s): LEGACY irq %d enabled\n",
  3370. skd_name(skdev), pdev->irq);
  3371. else
  3372. pr_err("(%s): request LEGACY irq error %d\n",
  3373. skd_name(skdev), rc);
  3374. break;
  3375. default:
  3376. pr_info("(%s): irq_type %d invalid, re-set to %d\n",
  3377. skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
  3378. skdev->irq_type = SKD_IRQ_LEGACY;
  3379. goto RETRY_IRQ_TYPE;
  3380. }
  3381. return rc;
  3382. }
  3383. static void skd_release_irq(struct skd_device *skdev)
  3384. {
  3385. switch (skdev->irq_type) {
  3386. case SKD_IRQ_MSIX:
  3387. skd_release_msix(skdev);
  3388. break;
  3389. case SKD_IRQ_MSI:
  3390. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3391. pci_disable_msi(skdev->pdev);
  3392. break;
  3393. case SKD_IRQ_LEGACY:
  3394. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3395. break;
  3396. default:
  3397. pr_err("(%s): wrong irq type %d!",
  3398. skd_name(skdev), skdev->irq_type);
  3399. break;
  3400. }
  3401. }
  3402. /*
  3403. *****************************************************************************
  3404. * CONSTRUCT
  3405. *****************************************************************************
  3406. */
  3407. static int skd_cons_skcomp(struct skd_device *skdev)
  3408. {
  3409. int rc = 0;
  3410. struct fit_completion_entry_v1 *skcomp;
  3411. u32 nbytes;
  3412. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  3413. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  3414. pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
  3415. skdev->name, __func__, __LINE__,
  3416. nbytes, SKD_N_COMPLETION_ENTRY);
  3417. skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
  3418. &skdev->cq_dma_address);
  3419. if (skcomp == NULL) {
  3420. rc = -ENOMEM;
  3421. goto err_out;
  3422. }
  3423. skdev->skcomp_table = skcomp;
  3424. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  3425. sizeof(*skcomp) *
  3426. SKD_N_COMPLETION_ENTRY);
  3427. err_out:
  3428. return rc;
  3429. }
  3430. static int skd_cons_skmsg(struct skd_device *skdev)
  3431. {
  3432. int rc = 0;
  3433. u32 i;
  3434. pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
  3435. skdev->name, __func__, __LINE__,
  3436. sizeof(struct skd_fitmsg_context),
  3437. skdev->num_fitmsg_context,
  3438. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  3439. skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
  3440. *skdev->num_fitmsg_context, GFP_KERNEL);
  3441. if (skdev->skmsg_table == NULL) {
  3442. rc = -ENOMEM;
  3443. goto err_out;
  3444. }
  3445. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3446. struct skd_fitmsg_context *skmsg;
  3447. skmsg = &skdev->skmsg_table[i];
  3448. skmsg->id = i + SKD_ID_FIT_MSG;
  3449. skmsg->state = SKD_MSG_STATE_IDLE;
  3450. skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
  3451. SKD_N_FITMSG_BYTES + 64,
  3452. &skmsg->mb_dma_address);
  3453. if (skmsg->msg_buf == NULL) {
  3454. rc = -ENOMEM;
  3455. goto err_out;
  3456. }
  3457. skmsg->offset = (u32)((u64)skmsg->msg_buf &
  3458. (~FIT_QCMD_BASE_ADDRESS_MASK));
  3459. skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3460. skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
  3461. FIT_QCMD_BASE_ADDRESS_MASK);
  3462. skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3463. skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
  3464. memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
  3465. skmsg->next = &skmsg[1];
  3466. }
  3467. /* Free list is in order starting with the 0th entry. */
  3468. skdev->skmsg_table[i - 1].next = NULL;
  3469. skdev->skmsg_free_list = skdev->skmsg_table;
  3470. err_out:
  3471. return rc;
  3472. }
  3473. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3474. u32 n_sg,
  3475. dma_addr_t *ret_dma_addr)
  3476. {
  3477. struct fit_sg_descriptor *sg_list;
  3478. u32 nbytes;
  3479. nbytes = sizeof(*sg_list) * n_sg;
  3480. sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
  3481. if (sg_list != NULL) {
  3482. uint64_t dma_address = *ret_dma_addr;
  3483. u32 i;
  3484. memset(sg_list, 0, nbytes);
  3485. for (i = 0; i < n_sg - 1; i++) {
  3486. uint64_t ndp_off;
  3487. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  3488. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  3489. }
  3490. sg_list[i].next_desc_ptr = 0LL;
  3491. }
  3492. return sg_list;
  3493. }
  3494. static int skd_cons_skreq(struct skd_device *skdev)
  3495. {
  3496. int rc = 0;
  3497. u32 i;
  3498. pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
  3499. skdev->name, __func__, __LINE__,
  3500. sizeof(struct skd_request_context),
  3501. skdev->num_req_context,
  3502. sizeof(struct skd_request_context) * skdev->num_req_context);
  3503. skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
  3504. * skdev->num_req_context, GFP_KERNEL);
  3505. if (skdev->skreq_table == NULL) {
  3506. rc = -ENOMEM;
  3507. goto err_out;
  3508. }
  3509. pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
  3510. skdev->name, __func__, __LINE__,
  3511. skdev->sgs_per_request, sizeof(struct scatterlist),
  3512. skdev->sgs_per_request * sizeof(struct scatterlist));
  3513. for (i = 0; i < skdev->num_req_context; i++) {
  3514. struct skd_request_context *skreq;
  3515. skreq = &skdev->skreq_table[i];
  3516. skreq->id = i + SKD_ID_RW_REQUEST;
  3517. skreq->state = SKD_REQ_STATE_IDLE;
  3518. skreq->sg = kzalloc(sizeof(struct scatterlist) *
  3519. skdev->sgs_per_request, GFP_KERNEL);
  3520. if (skreq->sg == NULL) {
  3521. rc = -ENOMEM;
  3522. goto err_out;
  3523. }
  3524. sg_init_table(skreq->sg, skdev->sgs_per_request);
  3525. skreq->sksg_list = skd_cons_sg_list(skdev,
  3526. skdev->sgs_per_request,
  3527. &skreq->sksg_dma_address);
  3528. if (skreq->sksg_list == NULL) {
  3529. rc = -ENOMEM;
  3530. goto err_out;
  3531. }
  3532. skreq->next = &skreq[1];
  3533. }
  3534. /* Free list is in order starting with the 0th entry. */
  3535. skdev->skreq_table[i - 1].next = NULL;
  3536. skdev->skreq_free_list = skdev->skreq_table;
  3537. err_out:
  3538. return rc;
  3539. }
  3540. static int skd_cons_skspcl(struct skd_device *skdev)
  3541. {
  3542. int rc = 0;
  3543. u32 i, nbytes;
  3544. pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
  3545. skdev->name, __func__, __LINE__,
  3546. sizeof(struct skd_special_context),
  3547. skdev->n_special,
  3548. sizeof(struct skd_special_context) * skdev->n_special);
  3549. skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
  3550. * skdev->n_special, GFP_KERNEL);
  3551. if (skdev->skspcl_table == NULL) {
  3552. rc = -ENOMEM;
  3553. goto err_out;
  3554. }
  3555. for (i = 0; i < skdev->n_special; i++) {
  3556. struct skd_special_context *skspcl;
  3557. skspcl = &skdev->skspcl_table[i];
  3558. skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
  3559. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3560. skspcl->req.next = &skspcl[1].req;
  3561. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3562. skspcl->msg_buf =
  3563. pci_zalloc_consistent(skdev->pdev, nbytes,
  3564. &skspcl->mb_dma_address);
  3565. if (skspcl->msg_buf == NULL) {
  3566. rc = -ENOMEM;
  3567. goto err_out;
  3568. }
  3569. skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
  3570. SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
  3571. if (skspcl->req.sg == NULL) {
  3572. rc = -ENOMEM;
  3573. goto err_out;
  3574. }
  3575. skspcl->req.sksg_list = skd_cons_sg_list(skdev,
  3576. SKD_N_SG_PER_SPECIAL,
  3577. &skspcl->req.
  3578. sksg_dma_address);
  3579. if (skspcl->req.sksg_list == NULL) {
  3580. rc = -ENOMEM;
  3581. goto err_out;
  3582. }
  3583. }
  3584. /* Free list is in order starting with the 0th entry. */
  3585. skdev->skspcl_table[i - 1].req.next = NULL;
  3586. skdev->skspcl_free_list = skdev->skspcl_table;
  3587. return rc;
  3588. err_out:
  3589. return rc;
  3590. }
  3591. static int skd_cons_sksb(struct skd_device *skdev)
  3592. {
  3593. int rc = 0;
  3594. struct skd_special_context *skspcl;
  3595. u32 nbytes;
  3596. skspcl = &skdev->internal_skspcl;
  3597. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  3598. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3599. nbytes = SKD_N_INTERNAL_BYTES;
  3600. skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3601. &skspcl->db_dma_address);
  3602. if (skspcl->data_buf == NULL) {
  3603. rc = -ENOMEM;
  3604. goto err_out;
  3605. }
  3606. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3607. skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3608. &skspcl->mb_dma_address);
  3609. if (skspcl->msg_buf == NULL) {
  3610. rc = -ENOMEM;
  3611. goto err_out;
  3612. }
  3613. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  3614. &skspcl->req.sksg_dma_address);
  3615. if (skspcl->req.sksg_list == NULL) {
  3616. rc = -ENOMEM;
  3617. goto err_out;
  3618. }
  3619. if (!skd_format_internal_skspcl(skdev)) {
  3620. rc = -EINVAL;
  3621. goto err_out;
  3622. }
  3623. err_out:
  3624. return rc;
  3625. }
  3626. static int skd_cons_disk(struct skd_device *skdev)
  3627. {
  3628. int rc = 0;
  3629. struct gendisk *disk;
  3630. struct request_queue *q;
  3631. unsigned long flags;
  3632. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  3633. if (!disk) {
  3634. rc = -ENOMEM;
  3635. goto err_out;
  3636. }
  3637. skdev->disk = disk;
  3638. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  3639. disk->major = skdev->major;
  3640. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  3641. disk->fops = &skd_blockdev_ops;
  3642. disk->private_data = skdev;
  3643. q = blk_init_queue(skd_request_fn, &skdev->lock);
  3644. if (!q) {
  3645. rc = -ENOMEM;
  3646. goto err_out;
  3647. }
  3648. skdev->queue = q;
  3649. disk->queue = q;
  3650. q->queuedata = skdev;
  3651. blk_queue_write_cache(q, true, true);
  3652. blk_queue_max_segments(q, skdev->sgs_per_request);
  3653. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  3654. /* set sysfs ptimal_io_size to 8K */
  3655. blk_queue_io_opt(q, 8192);
  3656. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  3657. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
  3658. spin_lock_irqsave(&skdev->lock, flags);
  3659. pr_debug("%s:%s:%d stopping %s queue\n",
  3660. skdev->name, __func__, __LINE__, skdev->name);
  3661. blk_stop_queue(skdev->queue);
  3662. spin_unlock_irqrestore(&skdev->lock, flags);
  3663. err_out:
  3664. return rc;
  3665. }
  3666. #define SKD_N_DEV_TABLE 16u
  3667. static u32 skd_next_devno;
  3668. static struct skd_device *skd_construct(struct pci_dev *pdev)
  3669. {
  3670. struct skd_device *skdev;
  3671. int blk_major = skd_major;
  3672. int rc;
  3673. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  3674. if (!skdev) {
  3675. pr_err(PFX "(%s): memory alloc failure\n",
  3676. pci_name(pdev));
  3677. return NULL;
  3678. }
  3679. skdev->state = SKD_DRVR_STATE_LOAD;
  3680. skdev->pdev = pdev;
  3681. skdev->devno = skd_next_devno++;
  3682. skdev->major = blk_major;
  3683. skdev->irq_type = skd_isr_type;
  3684. sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
  3685. skdev->dev_max_queue_depth = 0;
  3686. skdev->num_req_context = skd_max_queue_depth;
  3687. skdev->num_fitmsg_context = skd_max_queue_depth;
  3688. skdev->n_special = skd_max_pass_thru;
  3689. skdev->cur_max_queue_depth = 1;
  3690. skdev->queue_low_water_mark = 1;
  3691. skdev->proto_ver = 99;
  3692. skdev->sgs_per_request = skd_sgs_per_request;
  3693. skdev->dbg_level = skd_dbg_level;
  3694. atomic_set(&skdev->device_count, 0);
  3695. spin_lock_init(&skdev->lock);
  3696. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  3697. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3698. rc = skd_cons_skcomp(skdev);
  3699. if (rc < 0)
  3700. goto err_out;
  3701. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3702. rc = skd_cons_skmsg(skdev);
  3703. if (rc < 0)
  3704. goto err_out;
  3705. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3706. rc = skd_cons_skreq(skdev);
  3707. if (rc < 0)
  3708. goto err_out;
  3709. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3710. rc = skd_cons_skspcl(skdev);
  3711. if (rc < 0)
  3712. goto err_out;
  3713. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3714. rc = skd_cons_sksb(skdev);
  3715. if (rc < 0)
  3716. goto err_out;
  3717. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3718. rc = skd_cons_disk(skdev);
  3719. if (rc < 0)
  3720. goto err_out;
  3721. pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
  3722. return skdev;
  3723. err_out:
  3724. pr_debug("%s:%s:%d construct failed\n",
  3725. skdev->name, __func__, __LINE__);
  3726. skd_destruct(skdev);
  3727. return NULL;
  3728. }
  3729. /*
  3730. *****************************************************************************
  3731. * DESTRUCT (FREE)
  3732. *****************************************************************************
  3733. */
  3734. static void skd_free_skcomp(struct skd_device *skdev)
  3735. {
  3736. if (skdev->skcomp_table != NULL) {
  3737. u32 nbytes;
  3738. nbytes = sizeof(skdev->skcomp_table[0]) *
  3739. SKD_N_COMPLETION_ENTRY;
  3740. pci_free_consistent(skdev->pdev, nbytes,
  3741. skdev->skcomp_table, skdev->cq_dma_address);
  3742. }
  3743. skdev->skcomp_table = NULL;
  3744. skdev->cq_dma_address = 0;
  3745. }
  3746. static void skd_free_skmsg(struct skd_device *skdev)
  3747. {
  3748. u32 i;
  3749. if (skdev->skmsg_table == NULL)
  3750. return;
  3751. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3752. struct skd_fitmsg_context *skmsg;
  3753. skmsg = &skdev->skmsg_table[i];
  3754. if (skmsg->msg_buf != NULL) {
  3755. skmsg->msg_buf += skmsg->offset;
  3756. skmsg->mb_dma_address += skmsg->offset;
  3757. pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
  3758. skmsg->msg_buf,
  3759. skmsg->mb_dma_address);
  3760. }
  3761. skmsg->msg_buf = NULL;
  3762. skmsg->mb_dma_address = 0;
  3763. }
  3764. kfree(skdev->skmsg_table);
  3765. skdev->skmsg_table = NULL;
  3766. }
  3767. static void skd_free_sg_list(struct skd_device *skdev,
  3768. struct fit_sg_descriptor *sg_list,
  3769. u32 n_sg, dma_addr_t dma_addr)
  3770. {
  3771. if (sg_list != NULL) {
  3772. u32 nbytes;
  3773. nbytes = sizeof(*sg_list) * n_sg;
  3774. pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
  3775. }
  3776. }
  3777. static void skd_free_skreq(struct skd_device *skdev)
  3778. {
  3779. u32 i;
  3780. if (skdev->skreq_table == NULL)
  3781. return;
  3782. for (i = 0; i < skdev->num_req_context; i++) {
  3783. struct skd_request_context *skreq;
  3784. skreq = &skdev->skreq_table[i];
  3785. skd_free_sg_list(skdev, skreq->sksg_list,
  3786. skdev->sgs_per_request,
  3787. skreq->sksg_dma_address);
  3788. skreq->sksg_list = NULL;
  3789. skreq->sksg_dma_address = 0;
  3790. kfree(skreq->sg);
  3791. }
  3792. kfree(skdev->skreq_table);
  3793. skdev->skreq_table = NULL;
  3794. }
  3795. static void skd_free_skspcl(struct skd_device *skdev)
  3796. {
  3797. u32 i;
  3798. u32 nbytes;
  3799. if (skdev->skspcl_table == NULL)
  3800. return;
  3801. for (i = 0; i < skdev->n_special; i++) {
  3802. struct skd_special_context *skspcl;
  3803. skspcl = &skdev->skspcl_table[i];
  3804. if (skspcl->msg_buf != NULL) {
  3805. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3806. pci_free_consistent(skdev->pdev, nbytes,
  3807. skspcl->msg_buf,
  3808. skspcl->mb_dma_address);
  3809. }
  3810. skspcl->msg_buf = NULL;
  3811. skspcl->mb_dma_address = 0;
  3812. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  3813. SKD_N_SG_PER_SPECIAL,
  3814. skspcl->req.sksg_dma_address);
  3815. skspcl->req.sksg_list = NULL;
  3816. skspcl->req.sksg_dma_address = 0;
  3817. kfree(skspcl->req.sg);
  3818. }
  3819. kfree(skdev->skspcl_table);
  3820. skdev->skspcl_table = NULL;
  3821. }
  3822. static void skd_free_sksb(struct skd_device *skdev)
  3823. {
  3824. struct skd_special_context *skspcl;
  3825. u32 nbytes;
  3826. skspcl = &skdev->internal_skspcl;
  3827. if (skspcl->data_buf != NULL) {
  3828. nbytes = SKD_N_INTERNAL_BYTES;
  3829. pci_free_consistent(skdev->pdev, nbytes,
  3830. skspcl->data_buf, skspcl->db_dma_address);
  3831. }
  3832. skspcl->data_buf = NULL;
  3833. skspcl->db_dma_address = 0;
  3834. if (skspcl->msg_buf != NULL) {
  3835. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3836. pci_free_consistent(skdev->pdev, nbytes,
  3837. skspcl->msg_buf, skspcl->mb_dma_address);
  3838. }
  3839. skspcl->msg_buf = NULL;
  3840. skspcl->mb_dma_address = 0;
  3841. skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
  3842. skspcl->req.sksg_dma_address);
  3843. skspcl->req.sksg_list = NULL;
  3844. skspcl->req.sksg_dma_address = 0;
  3845. }
  3846. static void skd_free_disk(struct skd_device *skdev)
  3847. {
  3848. struct gendisk *disk = skdev->disk;
  3849. if (disk && (disk->flags & GENHD_FL_UP))
  3850. del_gendisk(disk);
  3851. if (skdev->queue) {
  3852. blk_cleanup_queue(skdev->queue);
  3853. skdev->queue = NULL;
  3854. disk->queue = NULL;
  3855. }
  3856. put_disk(disk);
  3857. skdev->disk = NULL;
  3858. }
  3859. static void skd_destruct(struct skd_device *skdev)
  3860. {
  3861. if (skdev == NULL)
  3862. return;
  3863. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3864. skd_free_disk(skdev);
  3865. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3866. skd_free_sksb(skdev);
  3867. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3868. skd_free_skspcl(skdev);
  3869. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3870. skd_free_skreq(skdev);
  3871. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3872. skd_free_skmsg(skdev);
  3873. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3874. skd_free_skcomp(skdev);
  3875. pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
  3876. kfree(skdev);
  3877. }
  3878. /*
  3879. *****************************************************************************
  3880. * BLOCK DEVICE (BDEV) GLUE
  3881. *****************************************************************************
  3882. */
  3883. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  3884. {
  3885. struct skd_device *skdev;
  3886. u64 capacity;
  3887. skdev = bdev->bd_disk->private_data;
  3888. pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
  3889. skdev->name, __func__, __LINE__,
  3890. bdev->bd_disk->disk_name, current->comm);
  3891. if (skdev->read_cap_is_valid) {
  3892. capacity = get_capacity(skdev->disk);
  3893. geo->heads = 64;
  3894. geo->sectors = 255;
  3895. geo->cylinders = (capacity) / (255 * 64);
  3896. return 0;
  3897. }
  3898. return -EIO;
  3899. }
  3900. static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
  3901. {
  3902. pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
  3903. device_add_disk(parent, skdev->disk);
  3904. return 0;
  3905. }
  3906. static const struct block_device_operations skd_blockdev_ops = {
  3907. .owner = THIS_MODULE,
  3908. .ioctl = skd_bdev_ioctl,
  3909. .getgeo = skd_bdev_getgeo,
  3910. };
  3911. /*
  3912. *****************************************************************************
  3913. * PCIe DRIVER GLUE
  3914. *****************************************************************************
  3915. */
  3916. static const struct pci_device_id skd_pci_tbl[] = {
  3917. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  3918. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  3919. { 0 } /* terminate list */
  3920. };
  3921. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  3922. static char *skd_pci_info(struct skd_device *skdev, char *str)
  3923. {
  3924. int pcie_reg;
  3925. strcpy(str, "PCIe (");
  3926. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  3927. if (pcie_reg) {
  3928. char lwstr[6];
  3929. uint16_t pcie_lstat, lspeed, lwidth;
  3930. pcie_reg += 0x12;
  3931. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  3932. lspeed = pcie_lstat & (0xF);
  3933. lwidth = (pcie_lstat & 0x3F0) >> 4;
  3934. if (lspeed == 1)
  3935. strcat(str, "2.5GT/s ");
  3936. else if (lspeed == 2)
  3937. strcat(str, "5.0GT/s ");
  3938. else
  3939. strcat(str, "<unknown> ");
  3940. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  3941. strcat(str, lwstr);
  3942. }
  3943. return str;
  3944. }
  3945. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  3946. {
  3947. int i;
  3948. int rc = 0;
  3949. char pci_str[32];
  3950. struct skd_device *skdev;
  3951. pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
  3952. DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
  3953. pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
  3954. pci_name(pdev), pdev->vendor, pdev->device);
  3955. rc = pci_enable_device(pdev);
  3956. if (rc)
  3957. return rc;
  3958. rc = pci_request_regions(pdev, DRV_NAME);
  3959. if (rc)
  3960. goto err_out;
  3961. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  3962. if (!rc) {
  3963. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3964. pr_err("(%s): consistent DMA mask error %d\n",
  3965. pci_name(pdev), rc);
  3966. }
  3967. } else {
  3968. (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
  3969. if (rc) {
  3970. pr_err("(%s): DMA mask error %d\n",
  3971. pci_name(pdev), rc);
  3972. goto err_out_regions;
  3973. }
  3974. }
  3975. if (!skd_major) {
  3976. rc = register_blkdev(0, DRV_NAME);
  3977. if (rc < 0)
  3978. goto err_out_regions;
  3979. BUG_ON(!rc);
  3980. skd_major = rc;
  3981. }
  3982. skdev = skd_construct(pdev);
  3983. if (skdev == NULL) {
  3984. rc = -ENOMEM;
  3985. goto err_out_regions;
  3986. }
  3987. skd_pci_info(skdev, pci_str);
  3988. pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
  3989. pci_set_master(pdev);
  3990. rc = pci_enable_pcie_error_reporting(pdev);
  3991. if (rc) {
  3992. pr_err(
  3993. "(%s): bad enable of PCIe error reporting rc=%d\n",
  3994. skd_name(skdev), rc);
  3995. skdev->pcie_error_reporting_is_enabled = 0;
  3996. } else
  3997. skdev->pcie_error_reporting_is_enabled = 1;
  3998. pci_set_drvdata(pdev, skdev);
  3999. for (i = 0; i < SKD_MAX_BARS; i++) {
  4000. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4001. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4002. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4003. skdev->mem_size[i]);
  4004. if (!skdev->mem_map[i]) {
  4005. pr_err("(%s): Unable to map adapter memory!\n",
  4006. skd_name(skdev));
  4007. rc = -ENODEV;
  4008. goto err_out_iounmap;
  4009. }
  4010. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4011. skdev->name, __func__, __LINE__,
  4012. skdev->mem_map[i],
  4013. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4014. }
  4015. rc = skd_acquire_irq(skdev);
  4016. if (rc) {
  4017. pr_err("(%s): interrupt resource error %d\n",
  4018. skd_name(skdev), rc);
  4019. goto err_out_iounmap;
  4020. }
  4021. rc = skd_start_timer(skdev);
  4022. if (rc)
  4023. goto err_out_timer;
  4024. init_waitqueue_head(&skdev->waitq);
  4025. skd_start_device(skdev);
  4026. rc = wait_event_interruptible_timeout(skdev->waitq,
  4027. (skdev->gendisk_on),
  4028. (SKD_START_WAIT_SECONDS * HZ));
  4029. if (skdev->gendisk_on > 0) {
  4030. /* device came on-line after reset */
  4031. skd_bdev_attach(&pdev->dev, skdev);
  4032. rc = 0;
  4033. } else {
  4034. /* we timed out, something is wrong with the device,
  4035. don't add the disk structure */
  4036. pr_err(
  4037. "(%s): error: waiting for s1120 timed out %d!\n",
  4038. skd_name(skdev), rc);
  4039. /* in case of no error; we timeout with ENXIO */
  4040. if (!rc)
  4041. rc = -ENXIO;
  4042. goto err_out_timer;
  4043. }
  4044. #ifdef SKD_VMK_POLL_HANDLER
  4045. if (skdev->irq_type == SKD_IRQ_MSIX) {
  4046. /* MSIX completion handler is being used for coredump */
  4047. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4048. skdev->msix_entries[5].vector,
  4049. skd_comp_q, skdev);
  4050. } else {
  4051. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4052. skdev->pdev->irq, skd_isr,
  4053. skdev);
  4054. }
  4055. #endif /* SKD_VMK_POLL_HANDLER */
  4056. return rc;
  4057. err_out_timer:
  4058. skd_stop_device(skdev);
  4059. skd_release_irq(skdev);
  4060. err_out_iounmap:
  4061. for (i = 0; i < SKD_MAX_BARS; i++)
  4062. if (skdev->mem_map[i])
  4063. iounmap(skdev->mem_map[i]);
  4064. if (skdev->pcie_error_reporting_is_enabled)
  4065. pci_disable_pcie_error_reporting(pdev);
  4066. skd_destruct(skdev);
  4067. err_out_regions:
  4068. pci_release_regions(pdev);
  4069. err_out:
  4070. pci_disable_device(pdev);
  4071. pci_set_drvdata(pdev, NULL);
  4072. return rc;
  4073. }
  4074. static void skd_pci_remove(struct pci_dev *pdev)
  4075. {
  4076. int i;
  4077. struct skd_device *skdev;
  4078. skdev = pci_get_drvdata(pdev);
  4079. if (!skdev) {
  4080. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4081. return;
  4082. }
  4083. skd_stop_device(skdev);
  4084. skd_release_irq(skdev);
  4085. for (i = 0; i < SKD_MAX_BARS; i++)
  4086. if (skdev->mem_map[i])
  4087. iounmap((u32 *)skdev->mem_map[i]);
  4088. if (skdev->pcie_error_reporting_is_enabled)
  4089. pci_disable_pcie_error_reporting(pdev);
  4090. skd_destruct(skdev);
  4091. pci_release_regions(pdev);
  4092. pci_disable_device(pdev);
  4093. pci_set_drvdata(pdev, NULL);
  4094. return;
  4095. }
  4096. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  4097. {
  4098. int i;
  4099. struct skd_device *skdev;
  4100. skdev = pci_get_drvdata(pdev);
  4101. if (!skdev) {
  4102. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4103. return -EIO;
  4104. }
  4105. skd_stop_device(skdev);
  4106. skd_release_irq(skdev);
  4107. for (i = 0; i < SKD_MAX_BARS; i++)
  4108. if (skdev->mem_map[i])
  4109. iounmap((u32 *)skdev->mem_map[i]);
  4110. if (skdev->pcie_error_reporting_is_enabled)
  4111. pci_disable_pcie_error_reporting(pdev);
  4112. pci_release_regions(pdev);
  4113. pci_save_state(pdev);
  4114. pci_disable_device(pdev);
  4115. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4116. return 0;
  4117. }
  4118. static int skd_pci_resume(struct pci_dev *pdev)
  4119. {
  4120. int i;
  4121. int rc = 0;
  4122. struct skd_device *skdev;
  4123. skdev = pci_get_drvdata(pdev);
  4124. if (!skdev) {
  4125. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4126. return -1;
  4127. }
  4128. pci_set_power_state(pdev, PCI_D0);
  4129. pci_enable_wake(pdev, PCI_D0, 0);
  4130. pci_restore_state(pdev);
  4131. rc = pci_enable_device(pdev);
  4132. if (rc)
  4133. return rc;
  4134. rc = pci_request_regions(pdev, DRV_NAME);
  4135. if (rc)
  4136. goto err_out;
  4137. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4138. if (!rc) {
  4139. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4140. pr_err("(%s): consistent DMA mask error %d\n",
  4141. pci_name(pdev), rc);
  4142. }
  4143. } else {
  4144. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4145. if (rc) {
  4146. pr_err("(%s): DMA mask error %d\n",
  4147. pci_name(pdev), rc);
  4148. goto err_out_regions;
  4149. }
  4150. }
  4151. pci_set_master(pdev);
  4152. rc = pci_enable_pcie_error_reporting(pdev);
  4153. if (rc) {
  4154. pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
  4155. skdev->name, rc);
  4156. skdev->pcie_error_reporting_is_enabled = 0;
  4157. } else
  4158. skdev->pcie_error_reporting_is_enabled = 1;
  4159. for (i = 0; i < SKD_MAX_BARS; i++) {
  4160. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4161. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4162. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4163. skdev->mem_size[i]);
  4164. if (!skdev->mem_map[i]) {
  4165. pr_err("(%s): Unable to map adapter memory!\n",
  4166. skd_name(skdev));
  4167. rc = -ENODEV;
  4168. goto err_out_iounmap;
  4169. }
  4170. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4171. skdev->name, __func__, __LINE__,
  4172. skdev->mem_map[i],
  4173. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4174. }
  4175. rc = skd_acquire_irq(skdev);
  4176. if (rc) {
  4177. pr_err("(%s): interrupt resource error %d\n",
  4178. pci_name(pdev), rc);
  4179. goto err_out_iounmap;
  4180. }
  4181. rc = skd_start_timer(skdev);
  4182. if (rc)
  4183. goto err_out_timer;
  4184. init_waitqueue_head(&skdev->waitq);
  4185. skd_start_device(skdev);
  4186. return rc;
  4187. err_out_timer:
  4188. skd_stop_device(skdev);
  4189. skd_release_irq(skdev);
  4190. err_out_iounmap:
  4191. for (i = 0; i < SKD_MAX_BARS; i++)
  4192. if (skdev->mem_map[i])
  4193. iounmap(skdev->mem_map[i]);
  4194. if (skdev->pcie_error_reporting_is_enabled)
  4195. pci_disable_pcie_error_reporting(pdev);
  4196. err_out_regions:
  4197. pci_release_regions(pdev);
  4198. err_out:
  4199. pci_disable_device(pdev);
  4200. return rc;
  4201. }
  4202. static void skd_pci_shutdown(struct pci_dev *pdev)
  4203. {
  4204. struct skd_device *skdev;
  4205. pr_err("skd_pci_shutdown called\n");
  4206. skdev = pci_get_drvdata(pdev);
  4207. if (!skdev) {
  4208. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4209. return;
  4210. }
  4211. pr_err("%s: calling stop\n", skd_name(skdev));
  4212. skd_stop_device(skdev);
  4213. }
  4214. static struct pci_driver skd_driver = {
  4215. .name = DRV_NAME,
  4216. .id_table = skd_pci_tbl,
  4217. .probe = skd_pci_probe,
  4218. .remove = skd_pci_remove,
  4219. .suspend = skd_pci_suspend,
  4220. .resume = skd_pci_resume,
  4221. .shutdown = skd_pci_shutdown,
  4222. };
  4223. /*
  4224. *****************************************************************************
  4225. * LOGGING SUPPORT
  4226. *****************************************************************************
  4227. */
  4228. static const char *skd_name(struct skd_device *skdev)
  4229. {
  4230. memset(skdev->id_str, 0, sizeof(skdev->id_str));
  4231. if (skdev->inquiry_is_valid)
  4232. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
  4233. skdev->name, skdev->inq_serial_num,
  4234. pci_name(skdev->pdev));
  4235. else
  4236. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
  4237. skdev->name, pci_name(skdev->pdev));
  4238. return skdev->id_str;
  4239. }
  4240. const char *skd_drive_state_to_str(int state)
  4241. {
  4242. switch (state) {
  4243. case FIT_SR_DRIVE_OFFLINE:
  4244. return "OFFLINE";
  4245. case FIT_SR_DRIVE_INIT:
  4246. return "INIT";
  4247. case FIT_SR_DRIVE_ONLINE:
  4248. return "ONLINE";
  4249. case FIT_SR_DRIVE_BUSY:
  4250. return "BUSY";
  4251. case FIT_SR_DRIVE_FAULT:
  4252. return "FAULT";
  4253. case FIT_SR_DRIVE_DEGRADED:
  4254. return "DEGRADED";
  4255. case FIT_SR_PCIE_LINK_DOWN:
  4256. return "INK_DOWN";
  4257. case FIT_SR_DRIVE_SOFT_RESET:
  4258. return "SOFT_RESET";
  4259. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  4260. return "NEED_FW";
  4261. case FIT_SR_DRIVE_INIT_FAULT:
  4262. return "INIT_FAULT";
  4263. case FIT_SR_DRIVE_BUSY_SANITIZE:
  4264. return "BUSY_SANITIZE";
  4265. case FIT_SR_DRIVE_BUSY_ERASE:
  4266. return "BUSY_ERASE";
  4267. case FIT_SR_DRIVE_FW_BOOTING:
  4268. return "FW_BOOTING";
  4269. default:
  4270. return "???";
  4271. }
  4272. }
  4273. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  4274. {
  4275. switch (state) {
  4276. case SKD_DRVR_STATE_LOAD:
  4277. return "LOAD";
  4278. case SKD_DRVR_STATE_IDLE:
  4279. return "IDLE";
  4280. case SKD_DRVR_STATE_BUSY:
  4281. return "BUSY";
  4282. case SKD_DRVR_STATE_STARTING:
  4283. return "STARTING";
  4284. case SKD_DRVR_STATE_ONLINE:
  4285. return "ONLINE";
  4286. case SKD_DRVR_STATE_PAUSING:
  4287. return "PAUSING";
  4288. case SKD_DRVR_STATE_PAUSED:
  4289. return "PAUSED";
  4290. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  4291. return "DRAINING_TIMEOUT";
  4292. case SKD_DRVR_STATE_RESTARTING:
  4293. return "RESTARTING";
  4294. case SKD_DRVR_STATE_RESUMING:
  4295. return "RESUMING";
  4296. case SKD_DRVR_STATE_STOPPING:
  4297. return "STOPPING";
  4298. case SKD_DRVR_STATE_SYNCING:
  4299. return "SYNCING";
  4300. case SKD_DRVR_STATE_FAULT:
  4301. return "FAULT";
  4302. case SKD_DRVR_STATE_DISAPPEARED:
  4303. return "DISAPPEARED";
  4304. case SKD_DRVR_STATE_BUSY_ERASE:
  4305. return "BUSY_ERASE";
  4306. case SKD_DRVR_STATE_BUSY_SANITIZE:
  4307. return "BUSY_SANITIZE";
  4308. case SKD_DRVR_STATE_BUSY_IMMINENT:
  4309. return "BUSY_IMMINENT";
  4310. case SKD_DRVR_STATE_WAIT_BOOT:
  4311. return "WAIT_BOOT";
  4312. default:
  4313. return "???";
  4314. }
  4315. }
  4316. static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
  4317. {
  4318. switch (state) {
  4319. case SKD_MSG_STATE_IDLE:
  4320. return "IDLE";
  4321. case SKD_MSG_STATE_BUSY:
  4322. return "BUSY";
  4323. default:
  4324. return "???";
  4325. }
  4326. }
  4327. static const char *skd_skreq_state_to_str(enum skd_req_state state)
  4328. {
  4329. switch (state) {
  4330. case SKD_REQ_STATE_IDLE:
  4331. return "IDLE";
  4332. case SKD_REQ_STATE_SETUP:
  4333. return "SETUP";
  4334. case SKD_REQ_STATE_BUSY:
  4335. return "BUSY";
  4336. case SKD_REQ_STATE_COMPLETED:
  4337. return "COMPLETED";
  4338. case SKD_REQ_STATE_TIMEOUT:
  4339. return "TIMEOUT";
  4340. case SKD_REQ_STATE_ABORTED:
  4341. return "ABORTED";
  4342. default:
  4343. return "???";
  4344. }
  4345. }
  4346. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  4347. {
  4348. pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
  4349. skdev->name, __func__, __LINE__, skdev->name, skdev, event);
  4350. pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
  4351. skdev->name, __func__, __LINE__,
  4352. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  4353. skd_skdev_state_to_str(skdev->state), skdev->state);
  4354. pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
  4355. skdev->name, __func__, __LINE__,
  4356. skdev->in_flight, skdev->cur_max_queue_depth,
  4357. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  4358. pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
  4359. skdev->name, __func__, __LINE__,
  4360. skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
  4361. }
  4362. static void skd_log_skmsg(struct skd_device *skdev,
  4363. struct skd_fitmsg_context *skmsg, const char *event)
  4364. {
  4365. pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
  4366. skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
  4367. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
  4368. skdev->name, __func__, __LINE__,
  4369. skd_skmsg_state_to_str(skmsg->state), skmsg->state,
  4370. skmsg->id, skmsg->length);
  4371. }
  4372. static void skd_log_skreq(struct skd_device *skdev,
  4373. struct skd_request_context *skreq, const char *event)
  4374. {
  4375. pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
  4376. skdev->name, __func__, __LINE__, skdev->name, skreq, event);
  4377. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  4378. skdev->name, __func__, __LINE__,
  4379. skd_skreq_state_to_str(skreq->state), skreq->state,
  4380. skreq->id, skreq->fitmsg_id);
  4381. pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
  4382. skdev->name, __func__, __LINE__,
  4383. skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
  4384. if (skreq->req != NULL) {
  4385. struct request *req = skreq->req;
  4386. u32 lba = (u32)blk_rq_pos(req);
  4387. u32 count = blk_rq_sectors(req);
  4388. pr_debug("%s:%s:%d "
  4389. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
  4390. skdev->name, __func__, __LINE__,
  4391. req, lba, lba, count, count,
  4392. (int)rq_data_dir(req));
  4393. } else
  4394. pr_debug("%s:%s:%d req=NULL\n",
  4395. skdev->name, __func__, __LINE__);
  4396. }
  4397. /*
  4398. *****************************************************************************
  4399. * MODULE GLUE
  4400. *****************************************************************************
  4401. */
  4402. static int __init skd_init(void)
  4403. {
  4404. pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
  4405. switch (skd_isr_type) {
  4406. case SKD_IRQ_LEGACY:
  4407. case SKD_IRQ_MSI:
  4408. case SKD_IRQ_MSIX:
  4409. break;
  4410. default:
  4411. pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
  4412. skd_isr_type, SKD_IRQ_DEFAULT);
  4413. skd_isr_type = SKD_IRQ_DEFAULT;
  4414. }
  4415. if (skd_max_queue_depth < 1 ||
  4416. skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  4417. pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
  4418. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  4419. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  4420. }
  4421. if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
  4422. pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
  4423. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  4424. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  4425. }
  4426. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  4427. pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
  4428. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  4429. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  4430. }
  4431. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  4432. pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
  4433. skd_dbg_level, 0);
  4434. skd_dbg_level = 0;
  4435. }
  4436. if (skd_isr_comp_limit < 0) {
  4437. pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
  4438. skd_isr_comp_limit, 0);
  4439. skd_isr_comp_limit = 0;
  4440. }
  4441. if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
  4442. pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
  4443. skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
  4444. skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  4445. }
  4446. return pci_register_driver(&skd_driver);
  4447. }
  4448. static void __exit skd_exit(void)
  4449. {
  4450. pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
  4451. pci_unregister_driver(&skd_driver);
  4452. if (skd_major)
  4453. unregister_blkdev(skd_major, DRV_NAME);
  4454. }
  4455. module_init(skd_init);
  4456. module_exit(skd_exit);