smartpqi.h 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136
  1. /*
  2. * driver for Microsemi PQI-based storage controllers
  3. * Copyright (c) 2016 Microsemi Corporation
  4. * Copyright (c) 2016 PMC-Sierra, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more details.
  14. *
  15. * Questions/Comments/Bugfixes to [email protected]
  16. *
  17. */
  18. #if !defined(_SMARTPQI_H)
  19. #define _SMARTPQI_H
  20. #pragma pack(1)
  21. #define PQI_DEVICE_SIGNATURE "PQI DREG"
  22. /* This structure is defined by the PQI specification. */
  23. struct pqi_device_registers {
  24. __le64 signature;
  25. u8 function_and_status_code;
  26. u8 reserved[7];
  27. u8 max_admin_iq_elements;
  28. u8 max_admin_oq_elements;
  29. u8 admin_iq_element_length; /* in 16-byte units */
  30. u8 admin_oq_element_length; /* in 16-byte units */
  31. __le16 max_reset_timeout; /* in 100-millisecond units */
  32. u8 reserved1[2];
  33. __le32 legacy_intx_status;
  34. __le32 legacy_intx_mask_set;
  35. __le32 legacy_intx_mask_clear;
  36. u8 reserved2[28];
  37. __le32 device_status;
  38. u8 reserved3[4];
  39. __le64 admin_iq_pi_offset;
  40. __le64 admin_oq_ci_offset;
  41. __le64 admin_iq_element_array_addr;
  42. __le64 admin_oq_element_array_addr;
  43. __le64 admin_iq_ci_addr;
  44. __le64 admin_oq_pi_addr;
  45. u8 admin_iq_num_elements;
  46. u8 admin_oq_num_elements;
  47. __le16 admin_queue_int_msg_num;
  48. u8 reserved4[4];
  49. __le32 device_error;
  50. u8 reserved5[4];
  51. __le64 error_details;
  52. __le32 device_reset;
  53. __le32 power_action;
  54. u8 reserved6[104];
  55. };
  56. /*
  57. * controller registers
  58. *
  59. * These are defined by the PMC implementation.
  60. *
  61. * Some registers (those named sis_*) are only used when in
  62. * legacy SIS mode before we transition the controller into
  63. * PQI mode. There are a number of other SIS mode registers,
  64. * but we don't use them, so only the SIS registers that we
  65. * care about are defined here. The offsets mentioned in the
  66. * comments are the offsets from the PCIe BAR 0.
  67. */
  68. struct pqi_ctrl_registers {
  69. u8 reserved[0x20];
  70. __le32 sis_host_to_ctrl_doorbell; /* 20h */
  71. u8 reserved1[0x34 - (0x20 + sizeof(__le32))];
  72. __le32 sis_interrupt_mask; /* 34h */
  73. u8 reserved2[0x9c - (0x34 + sizeof(__le32))];
  74. __le32 sis_ctrl_to_host_doorbell; /* 9Ch */
  75. u8 reserved3[0xa0 - (0x9c + sizeof(__le32))];
  76. __le32 sis_ctrl_to_host_doorbell_clear; /* A0h */
  77. u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))];
  78. __le32 sis_driver_scratch; /* B0h */
  79. u8 reserved5[0xbc - (0xb0 + sizeof(__le32))];
  80. __le32 sis_firmware_status; /* BCh */
  81. u8 reserved6[0x1000 - (0xbc + sizeof(__le32))];
  82. __le32 sis_mailbox[8]; /* 1000h */
  83. u8 reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
  84. /*
  85. * The PQI spec states that the PQI registers should be at
  86. * offset 0 from the PCIe BAR 0. However, we can't map
  87. * them at offset 0 because that would break compatibility
  88. * with the SIS registers. So we map them at offset 4000h.
  89. */
  90. struct pqi_device_registers pqi_registers; /* 4000h */
  91. };
  92. #define PQI_DEVICE_REGISTERS_OFFSET 0x4000
  93. enum pqi_io_path {
  94. RAID_PATH = 0,
  95. AIO_PATH = 1
  96. };
  97. struct pqi_sg_descriptor {
  98. __le64 address;
  99. __le32 length;
  100. __le32 flags;
  101. };
  102. /* manifest constants for the flags field of pqi_sg_descriptor */
  103. #define CISS_SG_LAST 0x40000000
  104. #define CISS_SG_CHAIN 0x80000000
  105. struct pqi_iu_header {
  106. u8 iu_type;
  107. u8 reserved;
  108. __le16 iu_length; /* in bytes - does not include the length */
  109. /* of this header */
  110. __le16 response_queue_id; /* specifies the OQ where the */
  111. /* response IU is to be delivered */
  112. u8 work_area[2]; /* reserved for driver use */
  113. };
  114. /*
  115. * According to the PQI spec, the IU header is only the first 4 bytes of our
  116. * pqi_iu_header structure.
  117. */
  118. #define PQI_REQUEST_HEADER_LENGTH 4
  119. struct pqi_general_admin_request {
  120. struct pqi_iu_header header;
  121. __le16 request_id;
  122. u8 function_code;
  123. union {
  124. struct {
  125. u8 reserved[33];
  126. __le32 buffer_length;
  127. struct pqi_sg_descriptor sg_descriptor;
  128. } report_device_capability;
  129. struct {
  130. u8 reserved;
  131. __le16 queue_id;
  132. u8 reserved1[2];
  133. __le64 element_array_addr;
  134. __le64 ci_addr;
  135. __le16 num_elements;
  136. __le16 element_length;
  137. u8 queue_protocol;
  138. u8 reserved2[23];
  139. __le32 vendor_specific;
  140. } create_operational_iq;
  141. struct {
  142. u8 reserved;
  143. __le16 queue_id;
  144. u8 reserved1[2];
  145. __le64 element_array_addr;
  146. __le64 pi_addr;
  147. __le16 num_elements;
  148. __le16 element_length;
  149. u8 queue_protocol;
  150. u8 reserved2[3];
  151. __le16 int_msg_num;
  152. __le16 coalescing_count;
  153. __le32 min_coalescing_time;
  154. __le32 max_coalescing_time;
  155. u8 reserved3[8];
  156. __le32 vendor_specific;
  157. } create_operational_oq;
  158. struct {
  159. u8 reserved;
  160. __le16 queue_id;
  161. u8 reserved1[50];
  162. } delete_operational_queue;
  163. struct {
  164. u8 reserved;
  165. __le16 queue_id;
  166. u8 reserved1[46];
  167. __le32 vendor_specific;
  168. } change_operational_iq_properties;
  169. } data;
  170. };
  171. struct pqi_general_admin_response {
  172. struct pqi_iu_header header;
  173. __le16 request_id;
  174. u8 function_code;
  175. u8 status;
  176. union {
  177. struct {
  178. u8 status_descriptor[4];
  179. __le64 iq_pi_offset;
  180. u8 reserved[40];
  181. } create_operational_iq;
  182. struct {
  183. u8 status_descriptor[4];
  184. __le64 oq_ci_offset;
  185. u8 reserved[40];
  186. } create_operational_oq;
  187. } data;
  188. };
  189. struct pqi_iu_layer_descriptor {
  190. u8 inbound_spanning_supported : 1;
  191. u8 reserved : 7;
  192. u8 reserved1[5];
  193. __le16 max_inbound_iu_length;
  194. u8 outbound_spanning_supported : 1;
  195. u8 reserved2 : 7;
  196. u8 reserved3[5];
  197. __le16 max_outbound_iu_length;
  198. };
  199. struct pqi_device_capability {
  200. __le16 data_length;
  201. u8 reserved[6];
  202. u8 iq_arbitration_priority_support_bitmask;
  203. u8 maximum_aw_a;
  204. u8 maximum_aw_b;
  205. u8 maximum_aw_c;
  206. u8 max_arbitration_burst : 3;
  207. u8 reserved1 : 4;
  208. u8 iqa : 1;
  209. u8 reserved2[2];
  210. u8 iq_freeze : 1;
  211. u8 reserved3 : 7;
  212. __le16 max_inbound_queues;
  213. __le16 max_elements_per_iq;
  214. u8 reserved4[4];
  215. __le16 max_iq_element_length;
  216. __le16 min_iq_element_length;
  217. u8 reserved5[2];
  218. __le16 max_outbound_queues;
  219. __le16 max_elements_per_oq;
  220. __le16 intr_coalescing_time_granularity;
  221. __le16 max_oq_element_length;
  222. __le16 min_oq_element_length;
  223. u8 reserved6[24];
  224. struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
  225. };
  226. #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4
  227. struct pqi_raid_path_request {
  228. struct pqi_iu_header header;
  229. __le16 request_id;
  230. __le16 nexus_id;
  231. __le32 buffer_length;
  232. u8 lun_number[8];
  233. __le16 protocol_specific;
  234. u8 data_direction : 2;
  235. u8 partial : 1;
  236. u8 reserved1 : 4;
  237. u8 fence : 1;
  238. __le16 error_index;
  239. u8 reserved2;
  240. u8 task_attribute : 3;
  241. u8 command_priority : 4;
  242. u8 reserved3 : 1;
  243. u8 reserved4 : 2;
  244. u8 additional_cdb_bytes_usage : 3;
  245. u8 reserved5 : 3;
  246. u8 cdb[32];
  247. struct pqi_sg_descriptor
  248. sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
  249. };
  250. struct pqi_aio_path_request {
  251. struct pqi_iu_header header;
  252. __le16 request_id;
  253. u8 reserved1[2];
  254. __le32 nexus_id;
  255. __le32 buffer_length;
  256. u8 data_direction : 2;
  257. u8 partial : 1;
  258. u8 memory_type : 1;
  259. u8 fence : 1;
  260. u8 encryption_enable : 1;
  261. u8 reserved2 : 2;
  262. u8 task_attribute : 3;
  263. u8 command_priority : 4;
  264. u8 reserved3 : 1;
  265. __le16 data_encryption_key_index;
  266. __le32 encrypt_tweak_lower;
  267. __le32 encrypt_tweak_upper;
  268. u8 cdb[16];
  269. __le16 error_index;
  270. u8 num_sg_descriptors;
  271. u8 cdb_length;
  272. u8 lun_number[8];
  273. u8 reserved4[4];
  274. struct pqi_sg_descriptor
  275. sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
  276. };
  277. struct pqi_io_response {
  278. struct pqi_iu_header header;
  279. __le16 request_id;
  280. __le16 error_index;
  281. u8 reserved2[4];
  282. };
  283. struct pqi_general_management_request {
  284. struct pqi_iu_header header;
  285. __le16 request_id;
  286. union {
  287. struct {
  288. u8 reserved[2];
  289. __le32 buffer_length;
  290. struct pqi_sg_descriptor sg_descriptors[3];
  291. } report_event_configuration;
  292. struct {
  293. __le16 global_event_oq_id;
  294. __le32 buffer_length;
  295. struct pqi_sg_descriptor sg_descriptors[3];
  296. } set_event_configuration;
  297. } data;
  298. };
  299. struct pqi_event_descriptor {
  300. u8 event_type;
  301. u8 reserved;
  302. __le16 oq_id;
  303. };
  304. struct pqi_event_config {
  305. u8 reserved[2];
  306. u8 num_event_descriptors;
  307. u8 reserved1;
  308. struct pqi_event_descriptor descriptors[1];
  309. };
  310. #define PQI_MAX_EVENT_DESCRIPTORS 255
  311. struct pqi_event_response {
  312. struct pqi_iu_header header;
  313. u8 event_type;
  314. u8 reserved2 : 7;
  315. u8 request_acknowlege : 1;
  316. __le16 event_id;
  317. __le32 additional_event_id;
  318. u8 data[16];
  319. };
  320. struct pqi_event_acknowledge_request {
  321. struct pqi_iu_header header;
  322. u8 event_type;
  323. u8 reserved2;
  324. __le16 event_id;
  325. __le32 additional_event_id;
  326. };
  327. struct pqi_task_management_request {
  328. struct pqi_iu_header header;
  329. __le16 request_id;
  330. __le16 nexus_id;
  331. u8 reserved[4];
  332. u8 lun_number[8];
  333. __le16 protocol_specific;
  334. __le16 outbound_queue_id_to_manage;
  335. __le16 request_id_to_manage;
  336. u8 task_management_function;
  337. u8 reserved2 : 7;
  338. u8 fence : 1;
  339. };
  340. #define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
  341. struct pqi_task_management_response {
  342. struct pqi_iu_header header;
  343. __le16 request_id;
  344. __le16 nexus_id;
  345. u8 additional_response_info[3];
  346. u8 response_code;
  347. };
  348. struct pqi_aio_error_info {
  349. u8 status;
  350. u8 service_response;
  351. u8 data_present;
  352. u8 reserved;
  353. __le32 residual_count;
  354. __le16 data_length;
  355. __le16 reserved1;
  356. u8 data[256];
  357. };
  358. struct pqi_raid_error_info {
  359. u8 data_in_result;
  360. u8 data_out_result;
  361. u8 reserved[3];
  362. u8 status;
  363. __le16 status_qualifier;
  364. __le16 sense_data_length;
  365. __le16 response_data_length;
  366. __le32 data_in_transferred;
  367. __le32 data_out_transferred;
  368. u8 data[256];
  369. };
  370. #define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
  371. #define PQI_REQUEST_IU_RAID_PATH_IO 0x14
  372. #define PQI_REQUEST_IU_AIO_PATH_IO 0x15
  373. #define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
  374. #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
  375. #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
  376. #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
  377. #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
  378. #define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93
  379. #define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0
  380. #define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0
  381. #define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1
  382. #define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2
  383. #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
  384. #define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4
  385. #define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5
  386. #define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0
  387. #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10
  388. #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ 0x11
  389. #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ 0x12
  390. #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ 0x13
  391. #define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY 0x14
  392. #define PQI_GENERAL_ADMIN_STATUS_SUCCESS 0x0
  393. #define PQI_IQ_PROPERTY_IS_AIO_QUEUE 0x1
  394. #define PQI_GENERAL_ADMIN_IU_LENGTH 0x3c
  395. #define PQI_PROTOCOL_SOP 0x0
  396. #define PQI_DATA_IN_OUT_GOOD 0x0
  397. #define PQI_DATA_IN_OUT_UNDERFLOW 0x1
  398. #define PQI_DATA_IN_OUT_BUFFER_ERROR 0x40
  399. #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW 0x41
  400. #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42
  401. #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43
  402. #define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60
  403. #define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61
  404. #define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62
  405. #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED 0x63
  406. #define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64
  407. #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65
  408. #define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66
  409. #define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67
  410. #define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x6F
  411. #define PQI_DATA_IN_OUT_ERROR 0xf0
  412. #define PQI_DATA_IN_OUT_PROTOCOL_ERROR 0xf1
  413. #define PQI_DATA_IN_OUT_HARDWARE_ERROR 0xf2
  414. #define PQI_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
  415. #define PQI_DATA_IN_OUT_ABORTED 0xf4
  416. #define PQI_DATA_IN_OUT_TIMEOUT 0xf5
  417. #define CISS_CMD_STATUS_SUCCESS 0x0
  418. #define CISS_CMD_STATUS_TARGET_STATUS 0x1
  419. #define CISS_CMD_STATUS_DATA_UNDERRUN 0x2
  420. #define CISS_CMD_STATUS_DATA_OVERRUN 0x3
  421. #define CISS_CMD_STATUS_INVALID 0x4
  422. #define CISS_CMD_STATUS_PROTOCOL_ERROR 0x5
  423. #define CISS_CMD_STATUS_HARDWARE_ERROR 0x6
  424. #define CISS_CMD_STATUS_CONNECTION_LOST 0x7
  425. #define CISS_CMD_STATUS_ABORTED 0x8
  426. #define CISS_CMD_STATUS_ABORT_FAILED 0x9
  427. #define CISS_CMD_STATUS_UNSOLICITED_ABORT 0xa
  428. #define CISS_CMD_STATUS_TIMEOUT 0xb
  429. #define CISS_CMD_STATUS_UNABORTABLE 0xc
  430. #define CISS_CMD_STATUS_TMF 0xd
  431. #define CISS_CMD_STATUS_AIO_DISABLED 0xe
  432. #define PQI_NUM_EVENT_QUEUE_ELEMENTS 32
  433. #define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response)
  434. #define PQI_EVENT_TYPE_HOTPLUG 0x1
  435. #define PQI_EVENT_TYPE_HARDWARE 0x2
  436. #define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4
  437. #define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
  438. #define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
  439. #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
  440. #define PQI_EVENT_TYPE_HEARTBEAT 0xff
  441. #pragma pack()
  442. #define PQI_ERROR_BUFFER_ELEMENT_LENGTH \
  443. sizeof(struct pqi_raid_error_info)
  444. /* these values are based on our implementation */
  445. #define PQI_ADMIN_IQ_NUM_ELEMENTS 8
  446. #define PQI_ADMIN_OQ_NUM_ELEMENTS 20
  447. #define PQI_ADMIN_IQ_ELEMENT_LENGTH 64
  448. #define PQI_ADMIN_OQ_ELEMENT_LENGTH 64
  449. #define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH 128
  450. #define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH 16
  451. #define PQI_MIN_MSIX_VECTORS 1
  452. #define PQI_MAX_MSIX_VECTORS 64
  453. /* these values are defined by the PQI spec */
  454. #define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255
  455. #define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
  456. #define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64
  457. #define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16
  458. #define PQI_ADMIN_INDEX_ALIGNMENT 64
  459. #define PQI_OPERATIONAL_INDEX_ALIGNMENT 4
  460. #define PQI_MIN_OPERATIONAL_QUEUE_ID 1
  461. #define PQI_MAX_OPERATIONAL_QUEUE_ID 65535
  462. #define PQI_AIO_SERV_RESPONSE_COMPLETE 0
  463. #define PQI_AIO_SERV_RESPONSE_FAILURE 1
  464. #define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2
  465. #define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3
  466. #define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4
  467. #define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
  468. #define PQI_AIO_STATUS_IO_ERROR 0x1
  469. #define PQI_AIO_STATUS_IO_ABORTED 0x2
  470. #define PQI_AIO_STATUS_NO_PATH_TO_DEVICE 0x3
  471. #define PQI_AIO_STATUS_INVALID_DEVICE 0x4
  472. #define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe
  473. #define PQI_AIO_STATUS_UNDERRUN 0x51
  474. #define PQI_AIO_STATUS_OVERRUN 0x75
  475. typedef u32 pqi_index_t;
  476. /* SOP data direction flags */
  477. #define SOP_NO_DIRECTION_FLAG 0
  478. #define SOP_WRITE_FLAG 1 /* host writes data to Data-Out */
  479. /* buffer */
  480. #define SOP_READ_FLAG 2 /* host receives data from Data-In */
  481. /* buffer */
  482. #define SOP_BIDIRECTIONAL 3 /* data is transferred from the */
  483. /* Data-Out buffer and data is */
  484. /* transferred to the Data-In buffer */
  485. #define SOP_TASK_ATTRIBUTE_SIMPLE 0
  486. #define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
  487. #define SOP_TASK_ATTRIBUTE_ORDERED 2
  488. #define SOP_TASK_ATTRIBUTE_ACA 4
  489. #define SOP_TMF_COMPLETE 0x0
  490. #define SOP_TMF_FUNCTION_SUCCEEDED 0x8
  491. /* additional CDB bytes usage field codes */
  492. #define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
  493. #define SOP_ADDITIONAL_CDB_BYTES_4 1 /* 20-byte CDB */
  494. #define SOP_ADDITIONAL_CDB_BYTES_8 2 /* 24-byte CDB */
  495. #define SOP_ADDITIONAL_CDB_BYTES_12 3 /* 28-byte CDB */
  496. #define SOP_ADDITIONAL_CDB_BYTES_16 4 /* 32-byte CDB */
  497. /*
  498. * The purpose of this structure is to obtain proper alignment of objects in
  499. * an admin queue pair.
  500. */
  501. struct pqi_admin_queues_aligned {
  502. __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
  503. u8 iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
  504. [PQI_ADMIN_IQ_NUM_ELEMENTS];
  505. __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
  506. u8 oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
  507. [PQI_ADMIN_OQ_NUM_ELEMENTS];
  508. __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
  509. __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
  510. };
  511. struct pqi_admin_queues {
  512. void *iq_element_array;
  513. void *oq_element_array;
  514. volatile pqi_index_t *iq_ci;
  515. volatile pqi_index_t *oq_pi;
  516. dma_addr_t iq_element_array_bus_addr;
  517. dma_addr_t oq_element_array_bus_addr;
  518. dma_addr_t iq_ci_bus_addr;
  519. dma_addr_t oq_pi_bus_addr;
  520. __le32 __iomem *iq_pi;
  521. pqi_index_t iq_pi_copy;
  522. __le32 __iomem *oq_ci;
  523. pqi_index_t oq_ci_copy;
  524. struct task_struct *task;
  525. u16 int_msg_num;
  526. };
  527. struct pqi_queue_group {
  528. struct pqi_ctrl_info *ctrl_info; /* backpointer */
  529. u16 iq_id[2];
  530. u16 oq_id;
  531. u16 int_msg_num;
  532. void *iq_element_array[2];
  533. void *oq_element_array;
  534. dma_addr_t iq_element_array_bus_addr[2];
  535. dma_addr_t oq_element_array_bus_addr;
  536. __le32 __iomem *iq_pi[2];
  537. pqi_index_t iq_pi_copy[2];
  538. volatile pqi_index_t *iq_ci[2];
  539. volatile pqi_index_t *oq_pi;
  540. dma_addr_t iq_ci_bus_addr[2];
  541. dma_addr_t oq_pi_bus_addr;
  542. __le32 __iomem *oq_ci;
  543. pqi_index_t oq_ci_copy;
  544. spinlock_t submit_lock[2]; /* protect submission queue */
  545. struct list_head request_list[2];
  546. };
  547. struct pqi_event_queue {
  548. u16 oq_id;
  549. u16 int_msg_num;
  550. void *oq_element_array;
  551. volatile pqi_index_t *oq_pi;
  552. dma_addr_t oq_element_array_bus_addr;
  553. dma_addr_t oq_pi_bus_addr;
  554. __le32 __iomem *oq_ci;
  555. pqi_index_t oq_ci_copy;
  556. };
  557. #define PQI_DEFAULT_QUEUE_GROUP 0
  558. #define PQI_MAX_QUEUE_GROUPS PQI_MAX_MSIX_VECTORS
  559. struct pqi_encryption_info {
  560. u16 data_encryption_key_index;
  561. u32 encrypt_tweak_lower;
  562. u32 encrypt_tweak_upper;
  563. };
  564. #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
  565. #define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
  566. #define RAID_MAP_MAX_ENTRIES 1024
  567. #define PQI_PHYSICAL_DEVICE_BUS 0
  568. #define PQI_RAID_VOLUME_BUS 1
  569. #define PQI_HBA_BUS 2
  570. #define PQI_MAX_BUS PQI_HBA_BUS
  571. #pragma pack(1)
  572. struct report_lun_header {
  573. __be32 list_length;
  574. u8 extended_response;
  575. u8 reserved[3];
  576. };
  577. struct report_log_lun_extended_entry {
  578. u8 lunid[8];
  579. u8 volume_id[16];
  580. };
  581. struct report_log_lun_extended {
  582. struct report_lun_header header;
  583. struct report_log_lun_extended_entry lun_entries[1];
  584. };
  585. struct report_phys_lun_extended_entry {
  586. u8 lunid[8];
  587. __be64 wwid;
  588. u8 device_type;
  589. u8 device_flags;
  590. u8 lun_count; /* number of LUNs in a multi-LUN device */
  591. u8 redundant_paths;
  592. u32 aio_handle;
  593. };
  594. /* for device_flags field of struct report_phys_lun_extended_entry */
  595. #define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK 0x1
  596. #define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
  597. struct report_phys_lun_extended {
  598. struct report_lun_header header;
  599. struct report_phys_lun_extended_entry lun_entries[1];
  600. };
  601. struct raid_map_disk_data {
  602. u32 aio_handle;
  603. u8 xor_mult[2];
  604. u8 reserved[2];
  605. };
  606. /* constants for flags field of RAID map */
  607. #define RAID_MAP_ENCRYPTION_ENABLED 0x1
  608. struct raid_map {
  609. __le32 structure_size; /* size of entire structure in bytes */
  610. __le32 volume_blk_size; /* bytes / block in the volume */
  611. __le64 volume_blk_cnt; /* logical blocks on the volume */
  612. u8 phys_blk_shift; /* shift factor to convert between */
  613. /* units of logical blocks and */
  614. /* physical disk blocks */
  615. u8 parity_rotation_shift; /* shift factor to convert between */
  616. /* units of logical stripes and */
  617. /* physical stripes */
  618. __le16 strip_size; /* blocks used on each disk / stripe */
  619. __le64 disk_starting_blk; /* first disk block used in volume */
  620. __le64 disk_blk_cnt; /* disk blocks used by volume / disk */
  621. __le16 data_disks_per_row; /* data disk entries / row in the map */
  622. __le16 metadata_disks_per_row; /* mirror/parity disk entries / row */
  623. /* in the map */
  624. __le16 row_cnt; /* rows in each layout map */
  625. __le16 layout_map_count; /* layout maps (1 map per */
  626. /* mirror parity group) */
  627. __le16 flags;
  628. __le16 data_encryption_key_index;
  629. u8 reserved[16];
  630. struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
  631. };
  632. #pragma pack()
  633. #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
  634. struct pqi_scsi_dev {
  635. int devtype; /* as reported by INQUIRY commmand */
  636. u8 device_type; /* as reported by */
  637. /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
  638. /* only valid for devtype = TYPE_DISK */
  639. int bus;
  640. int target;
  641. int lun;
  642. u8 scsi3addr[8];
  643. __be64 wwid;
  644. u8 volume_id[16];
  645. u8 is_physical_device : 1;
  646. u8 target_lun_valid : 1;
  647. u8 expose_device : 1;
  648. u8 no_uld_attach : 1;
  649. u8 aio_enabled : 1; /* only valid for physical disks */
  650. u8 device_gone : 1;
  651. u8 new_device : 1;
  652. u8 keep_device : 1;
  653. u8 volume_offline : 1;
  654. u8 vendor[8]; /* bytes 8-15 of inquiry data */
  655. u8 model[16]; /* bytes 16-31 of inquiry data */
  656. u64 sas_address;
  657. u8 raid_level;
  658. u16 queue_depth; /* max. queue_depth for this device */
  659. u16 advertised_queue_depth;
  660. u32 aio_handle;
  661. u8 volume_status;
  662. u8 active_path_index;
  663. u8 path_map;
  664. u8 bay;
  665. u8 box[8];
  666. u16 phys_connector[8];
  667. int offload_configured; /* I/O accel RAID offload configured */
  668. int offload_enabled; /* I/O accel RAID offload enabled */
  669. int offload_enabled_pending;
  670. int offload_to_mirror; /* Send next I/O accelerator RAID */
  671. /* offload request to mirror drive. */
  672. struct raid_map *raid_map; /* I/O accelerator RAID map */
  673. struct pqi_sas_port *sas_port;
  674. struct scsi_device *sdev;
  675. struct list_head scsi_device_list_entry;
  676. struct list_head new_device_list_entry;
  677. struct list_head add_list_entry;
  678. struct list_head delete_list_entry;
  679. };
  680. /* VPD inquiry pages */
  681. #define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
  682. #define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
  683. #define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
  684. #define CISS_VPD_LV_OFFLOAD_STATUS 0xc2 /* vendor-specific page */
  685. #define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
  686. #define VPD_PAGE (1 << 8)
  687. #pragma pack(1)
  688. /* structure for CISS_VPD_LV_STATUS */
  689. struct ciss_vpd_logical_volume_status {
  690. u8 peripheral_info;
  691. u8 page_code;
  692. u8 reserved;
  693. u8 page_length;
  694. u8 volume_status;
  695. u8 reserved2[3];
  696. __be32 flags;
  697. };
  698. #pragma pack()
  699. /* constants for volume_status field of ciss_vpd_logical_volume_status */
  700. #define CISS_LV_OK 0
  701. #define CISS_LV_FAILED 1
  702. #define CISS_LV_NOT_CONFIGURED 2
  703. #define CISS_LV_DEGRADED 3
  704. #define CISS_LV_READY_FOR_RECOVERY 4
  705. #define CISS_LV_UNDERGOING_RECOVERY 5
  706. #define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED 6
  707. #define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 7
  708. #define CISS_LV_HARDWARE_OVERHEATING 8
  709. #define CISS_LV_HARDWARE_HAS_OVERHEATED 9
  710. #define CISS_LV_UNDERGOING_EXPANSION 10
  711. #define CISS_LV_NOT_AVAILABLE 11
  712. #define CISS_LV_QUEUED_FOR_EXPANSION 12
  713. #define CISS_LV_DISABLED_SCSI_ID_CONFLICT 13
  714. #define CISS_LV_EJECTED 14
  715. #define CISS_LV_UNDERGOING_ERASE 15
  716. /* state 16 not used */
  717. #define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD 17
  718. #define CISS_LV_UNDERGOING_RPI 18
  719. #define CISS_LV_PENDING_RPI 19
  720. #define CISS_LV_ENCRYPTED_NO_KEY 20
  721. /* state 21 not used */
  722. #define CISS_LV_UNDERGOING_ENCRYPTION 22
  723. #define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING 23
  724. #define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 24
  725. #define CISS_LV_PENDING_ENCRYPTION 25
  726. #define CISS_LV_PENDING_ENCRYPTION_REKEYING 26
  727. #define CISS_LV_NOT_SUPPORTED 27
  728. #define CISS_LV_STATUS_UNAVAILABLE 255
  729. /* constants for flags field of ciss_vpd_logical_volume_status */
  730. #define CISS_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */
  731. /* host I/O */
  732. /* for SAS hosts and SAS expanders */
  733. struct pqi_sas_node {
  734. struct device *parent_dev;
  735. struct list_head port_list_head;
  736. };
  737. struct pqi_sas_port {
  738. struct list_head port_list_entry;
  739. u64 sas_address;
  740. struct sas_port *port;
  741. int next_phy_index;
  742. struct list_head phy_list_head;
  743. struct pqi_sas_node *parent_node;
  744. struct sas_rphy *rphy;
  745. };
  746. struct pqi_sas_phy {
  747. struct list_head phy_list_entry;
  748. struct sas_phy *phy;
  749. struct pqi_sas_port *parent_port;
  750. bool added_to_port;
  751. };
  752. struct pqi_io_request {
  753. atomic_t refcount;
  754. u16 index;
  755. void (*io_complete_callback)(struct pqi_io_request *io_request,
  756. void *context);
  757. void *context;
  758. int status;
  759. struct scsi_cmnd *scmd;
  760. void *error_info;
  761. struct pqi_sg_descriptor *sg_chain_buffer;
  762. dma_addr_t sg_chain_buffer_dma_handle;
  763. void *iu;
  764. struct list_head request_list_entry;
  765. };
  766. /* for indexing into the pending_events[] field of struct pqi_ctrl_info */
  767. #define PQI_EVENT_HEARTBEAT 0
  768. #define PQI_EVENT_HOTPLUG 1
  769. #define PQI_EVENT_HARDWARE 2
  770. #define PQI_EVENT_PHYSICAL_DEVICE 3
  771. #define PQI_EVENT_LOGICAL_DEVICE 4
  772. #define PQI_EVENT_AIO_STATE_CHANGE 5
  773. #define PQI_EVENT_AIO_CONFIG_CHANGE 6
  774. #define PQI_NUM_SUPPORTED_EVENTS 7
  775. struct pqi_event {
  776. bool pending;
  777. u8 event_type;
  778. __le16 event_id;
  779. __le32 additional_event_id;
  780. };
  781. #define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
  782. #define PQI_RESERVED_IO_SLOTS_EVENT_ACK PQI_NUM_SUPPORTED_EVENTS
  783. #define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS 3
  784. #define PQI_RESERVED_IO_SLOTS \
  785. (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
  786. PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
  787. struct pqi_ctrl_info {
  788. unsigned int ctrl_id;
  789. struct pci_dev *pci_dev;
  790. char firmware_version[11];
  791. void __iomem *iomem_base;
  792. struct pqi_ctrl_registers __iomem *registers;
  793. struct pqi_device_registers __iomem *pqi_registers;
  794. u32 max_sg_entries;
  795. u32 config_table_offset;
  796. u32 config_table_length;
  797. u16 max_inbound_queues;
  798. u16 max_elements_per_iq;
  799. u16 max_iq_element_length;
  800. u16 max_outbound_queues;
  801. u16 max_elements_per_oq;
  802. u16 max_oq_element_length;
  803. u32 max_transfer_size;
  804. u32 max_outstanding_requests;
  805. u32 max_io_slots;
  806. unsigned int scsi_ml_can_queue;
  807. unsigned short sg_tablesize;
  808. unsigned int max_sectors;
  809. u32 error_buffer_length;
  810. void *error_buffer;
  811. dma_addr_t error_buffer_dma_handle;
  812. size_t sg_chain_buffer_length;
  813. unsigned int num_queue_groups;
  814. unsigned int num_active_queue_groups;
  815. u16 num_elements_per_iq;
  816. u16 num_elements_per_oq;
  817. u16 max_inbound_iu_length_per_firmware;
  818. u16 max_inbound_iu_length;
  819. unsigned int max_sg_per_iu;
  820. void *admin_queue_memory_base;
  821. u32 admin_queue_memory_length;
  822. dma_addr_t admin_queue_memory_base_dma_handle;
  823. void *queue_memory_base;
  824. u32 queue_memory_length;
  825. dma_addr_t queue_memory_base_dma_handle;
  826. struct pqi_admin_queues admin_queues;
  827. struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
  828. struct pqi_event_queue event_queue;
  829. int max_msix_vectors;
  830. int num_msix_vectors_enabled;
  831. int num_msix_vectors_initialized;
  832. u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
  833. void *intr_data[PQI_MAX_MSIX_VECTORS];
  834. int event_irq;
  835. struct Scsi_Host *scsi_host;
  836. struct mutex scan_mutex;
  837. u8 inbound_spanning_supported : 1;
  838. u8 outbound_spanning_supported : 1;
  839. u8 pqi_mode_enabled : 1;
  840. u8 controller_online : 1;
  841. u8 heartbeat_timer_started : 1;
  842. struct list_head scsi_device_list;
  843. spinlock_t scsi_device_list_lock;
  844. struct delayed_work rescan_work;
  845. struct delayed_work update_time_work;
  846. struct pqi_sas_node *sas_host;
  847. u64 sas_address;
  848. struct pqi_io_request *io_request_pool;
  849. u16 next_io_request_slot;
  850. struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
  851. struct work_struct event_work;
  852. atomic_t num_interrupts;
  853. int previous_num_interrupts;
  854. unsigned int num_heartbeats_requested;
  855. struct timer_list heartbeat_timer;
  856. struct semaphore sync_request_sem;
  857. struct semaphore lun_reset_sem;
  858. };
  859. enum pqi_ctrl_mode {
  860. UNKNOWN,
  861. PQI_MODE
  862. };
  863. /*
  864. * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
  865. */
  866. #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
  867. /* 0 = no limit */
  868. #define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH 0
  869. /* CISS commands */
  870. #define CISS_READ 0xc0
  871. #define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
  872. #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
  873. #define CISS_GET_RAID_MAP 0xc8
  874. /* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
  875. #define CISS_REPORT_LOG_EXTENDED 0x1
  876. #define CISS_REPORT_PHYS_EXTENDED 0x2
  877. /* BMIC commands */
  878. #define BMIC_IDENTIFY_CONTROLLER 0x11
  879. #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
  880. #define BMIC_READ 0x26
  881. #define BMIC_WRITE 0x27
  882. #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
  883. #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
  884. #define BMIC_WRITE_HOST_WELLNESS 0xa5
  885. #define BMIC_CACHE_FLUSH 0xc2
  886. #define SA_CACHE_FLUSH 0x01
  887. #define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0)
  888. #define CISS_GET_BUS(lunid) ((lunid)[7] & 0x3f)
  889. #define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6])
  890. #define CISS_GET_DRIVE_NUMBER(lunid) \
  891. (((CISS_GET_BUS((lunid)) - 1) << 8) + \
  892. CISS_GET_LEVEL_2_TARGET((lunid)))
  893. #define NO_TIMEOUT ((unsigned long) -1)
  894. #pragma pack(1)
  895. struct bmic_identify_controller {
  896. u8 configured_logical_drive_count;
  897. __le32 configuration_signature;
  898. u8 firmware_version[4];
  899. u8 reserved[145];
  900. __le16 extended_logical_unit_count;
  901. u8 reserved1[34];
  902. __le16 firmware_build_number;
  903. u8 reserved2[100];
  904. u8 controller_mode;
  905. u8 reserved3[32];
  906. };
  907. struct bmic_identify_physical_device {
  908. u8 scsi_bus; /* SCSI Bus number on controller */
  909. u8 scsi_id; /* SCSI ID on this bus */
  910. __le16 block_size; /* sector size in bytes */
  911. __le32 total_blocks; /* number for sectors on drive */
  912. __le32 reserved_blocks; /* controller reserved (RIS) */
  913. u8 model[40]; /* Physical Drive Model */
  914. u8 serial_number[40]; /* Drive Serial Number */
  915. u8 firmware_revision[8]; /* drive firmware revision */
  916. u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
  917. u8 compaq_drive_stamp; /* 0 means drive not stamped */
  918. u8 last_failure_reason;
  919. u8 flags;
  920. u8 more_flags;
  921. u8 scsi_lun; /* SCSI LUN for phys drive */
  922. u8 yet_more_flags;
  923. u8 even_more_flags;
  924. __le32 spi_speed_rules;
  925. u8 phys_connector[2]; /* connector number on controller */
  926. u8 phys_box_on_bus; /* phys enclosure this drive resides */
  927. u8 phys_bay_in_box; /* phys drv bay this drive resides */
  928. __le32 rpm; /* drive rotational speed in RPM */
  929. u8 device_type; /* type of drive */
  930. u8 sata_version; /* only valid when device_type = */
  931. /* BMIC_DEVICE_TYPE_SATA */
  932. __le64 big_total_block_count;
  933. __le64 ris_starting_lba;
  934. __le32 ris_size;
  935. u8 wwid[20];
  936. u8 controller_phy_map[32];
  937. __le16 phy_count;
  938. u8 phy_connected_dev_type[256];
  939. u8 phy_to_drive_bay_num[256];
  940. __le16 phy_to_attached_dev_index[256];
  941. u8 box_index;
  942. u8 reserved;
  943. __le16 extra_physical_drive_flags;
  944. u8 negotiated_link_rate[256];
  945. u8 phy_to_phy_map[256];
  946. u8 redundant_path_present_map;
  947. u8 redundant_path_failure_map;
  948. u8 active_path_number;
  949. __le16 alternate_paths_phys_connector[8];
  950. u8 alternate_paths_phys_box_on_port[8];
  951. u8 multi_lun_device_lun_count;
  952. u8 minimum_good_fw_revision[8];
  953. u8 unique_inquiry_bytes[20];
  954. u8 current_temperature_degreesC;
  955. u8 temperature_threshold_degreesC;
  956. u8 max_temperature_degreesC;
  957. u8 logical_blocks_per_phys_block_exp;
  958. __le16 current_queue_depth_limit;
  959. u8 switch_name[10];
  960. __le16 switch_port;
  961. u8 alternate_paths_switch_name[40];
  962. u8 alternate_paths_switch_port[8];
  963. __le16 power_on_hours;
  964. __le16 percent_endurance_used;
  965. u8 drive_authentication;
  966. u8 smart_carrier_authentication;
  967. u8 smart_carrier_app_fw_version;
  968. u8 smart_carrier_bootloader_fw_version;
  969. u8 encryption_key_name[64];
  970. __le32 misc_drive_flags;
  971. __le16 dek_index;
  972. u8 padding[112];
  973. };
  974. #pragma pack()
  975. int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
  976. void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
  977. int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
  978. struct pqi_scsi_dev *device);
  979. void pqi_remove_sas_device(struct pqi_scsi_dev *device);
  980. struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
  981. struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
  982. extern struct sas_function_template pqi_sas_transport_functions;
  983. #if !defined(readq)
  984. #define readq readq
  985. static inline u64 readq(const volatile void __iomem *addr)
  986. {
  987. u32 lower32;
  988. u32 upper32;
  989. lower32 = readl(addr);
  990. upper32 = readl(addr + 4);
  991. return ((u64)upper32 << 32) | lower32;
  992. }
  993. #endif
  994. #if !defined(writeq)
  995. #define writeq writeq
  996. static inline void writeq(u64 value, volatile void __iomem *addr)
  997. {
  998. u32 lower32;
  999. u32 upper32;
  1000. lower32 = lower_32_bits(value);
  1001. upper32 = upper_32_bits(value);
  1002. writel(lower32, addr);
  1003. writel(upper32, addr + 4);
  1004. }
  1005. #endif
  1006. #endif /* _SMARTPQI_H */