smartpqi.h 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /*
  2. * driver for Microsemi PQI-based storage controllers
  3. * Copyright (c) 2016-2017 Microsemi Corporation
  4. * Copyright (c) 2016 PMC-Sierra, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more details.
  14. *
  15. * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
  16. *
  17. */
  18. #include <linux/io-64-nonatomic-lo-hi.h>
  19. #if !defined(_SMARTPQI_H)
  20. #define _SMARTPQI_H
  21. #pragma pack(1)
  22. #define PQI_DEVICE_SIGNATURE "PQI DREG"
  23. /* This structure is defined by the PQI specification. */
  24. struct pqi_device_registers {
  25. __le64 signature;
  26. u8 function_and_status_code;
  27. u8 reserved[7];
  28. u8 max_admin_iq_elements;
  29. u8 max_admin_oq_elements;
  30. u8 admin_iq_element_length; /* in 16-byte units */
  31. u8 admin_oq_element_length; /* in 16-byte units */
  32. __le16 max_reset_timeout; /* in 100-millisecond units */
  33. u8 reserved1[2];
  34. __le32 legacy_intx_status;
  35. __le32 legacy_intx_mask_set;
  36. __le32 legacy_intx_mask_clear;
  37. u8 reserved2[28];
  38. __le32 device_status;
  39. u8 reserved3[4];
  40. __le64 admin_iq_pi_offset;
  41. __le64 admin_oq_ci_offset;
  42. __le64 admin_iq_element_array_addr;
  43. __le64 admin_oq_element_array_addr;
  44. __le64 admin_iq_ci_addr;
  45. __le64 admin_oq_pi_addr;
  46. u8 admin_iq_num_elements;
  47. u8 admin_oq_num_elements;
  48. __le16 admin_queue_int_msg_num;
  49. u8 reserved4[4];
  50. __le32 device_error;
  51. u8 reserved5[4];
  52. __le64 error_details;
  53. __le32 device_reset;
  54. __le32 power_action;
  55. u8 reserved6[104];
  56. };
  57. /*
  58. * controller registers
  59. *
  60. * These are defined by the Microsemi implementation.
  61. *
  62. * Some registers (those named sis_*) are only used when in
  63. * legacy SIS mode before we transition the controller into
  64. * PQI mode. There are a number of other SIS mode registers,
  65. * but we don't use them, so only the SIS registers that we
  66. * care about are defined here. The offsets mentioned in the
  67. * comments are the offsets from the PCIe BAR 0.
  68. */
  69. struct pqi_ctrl_registers {
  70. u8 reserved[0x20];
  71. __le32 sis_host_to_ctrl_doorbell; /* 20h */
  72. u8 reserved1[0x34 - (0x20 + sizeof(__le32))];
  73. __le32 sis_interrupt_mask; /* 34h */
  74. u8 reserved2[0x9c - (0x34 + sizeof(__le32))];
  75. __le32 sis_ctrl_to_host_doorbell; /* 9Ch */
  76. u8 reserved3[0xa0 - (0x9c + sizeof(__le32))];
  77. __le32 sis_ctrl_to_host_doorbell_clear; /* A0h */
  78. u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))];
  79. __le32 sis_driver_scratch; /* B0h */
  80. u8 reserved5[0xbc - (0xb0 + sizeof(__le32))];
  81. __le32 sis_firmware_status; /* BCh */
  82. u8 reserved6[0x1000 - (0xbc + sizeof(__le32))];
  83. __le32 sis_mailbox[8]; /* 1000h */
  84. u8 reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
  85. /*
  86. * The PQI spec states that the PQI registers should be at
  87. * offset 0 from the PCIe BAR 0. However, we can't map
  88. * them at offset 0 because that would break compatibility
  89. * with the SIS registers. So we map them at offset 4000h.
  90. */
  91. struct pqi_device_registers pqi_registers; /* 4000h */
  92. };
  93. #define PQI_DEVICE_REGISTERS_OFFSET 0x4000
  94. enum pqi_io_path {
  95. RAID_PATH = 0,
  96. AIO_PATH = 1
  97. };
  98. enum pqi_irq_mode {
  99. IRQ_MODE_NONE,
  100. IRQ_MODE_INTX,
  101. IRQ_MODE_MSIX
  102. };
  103. struct pqi_sg_descriptor {
  104. __le64 address;
  105. __le32 length;
  106. __le32 flags;
  107. };
  108. /* manifest constants for the flags field of pqi_sg_descriptor */
  109. #define CISS_SG_LAST 0x40000000
  110. #define CISS_SG_CHAIN 0x80000000
  111. struct pqi_iu_header {
  112. u8 iu_type;
  113. u8 reserved;
  114. __le16 iu_length; /* in bytes - does not include the length */
  115. /* of this header */
  116. __le16 response_queue_id; /* specifies the OQ where the */
  117. /* response IU is to be delivered */
  118. u8 work_area[2]; /* reserved for driver use */
  119. };
  120. /*
  121. * According to the PQI spec, the IU header is only the first 4 bytes of our
  122. * pqi_iu_header structure.
  123. */
  124. #define PQI_REQUEST_HEADER_LENGTH 4
  125. struct pqi_general_admin_request {
  126. struct pqi_iu_header header;
  127. __le16 request_id;
  128. u8 function_code;
  129. union {
  130. struct {
  131. u8 reserved[33];
  132. __le32 buffer_length;
  133. struct pqi_sg_descriptor sg_descriptor;
  134. } report_device_capability;
  135. struct {
  136. u8 reserved;
  137. __le16 queue_id;
  138. u8 reserved1[2];
  139. __le64 element_array_addr;
  140. __le64 ci_addr;
  141. __le16 num_elements;
  142. __le16 element_length;
  143. u8 queue_protocol;
  144. u8 reserved2[23];
  145. __le32 vendor_specific;
  146. } create_operational_iq;
  147. struct {
  148. u8 reserved;
  149. __le16 queue_id;
  150. u8 reserved1[2];
  151. __le64 element_array_addr;
  152. __le64 pi_addr;
  153. __le16 num_elements;
  154. __le16 element_length;
  155. u8 queue_protocol;
  156. u8 reserved2[3];
  157. __le16 int_msg_num;
  158. __le16 coalescing_count;
  159. __le32 min_coalescing_time;
  160. __le32 max_coalescing_time;
  161. u8 reserved3[8];
  162. __le32 vendor_specific;
  163. } create_operational_oq;
  164. struct {
  165. u8 reserved;
  166. __le16 queue_id;
  167. u8 reserved1[50];
  168. } delete_operational_queue;
  169. struct {
  170. u8 reserved;
  171. __le16 queue_id;
  172. u8 reserved1[46];
  173. __le32 vendor_specific;
  174. } change_operational_iq_properties;
  175. } data;
  176. };
  177. struct pqi_general_admin_response {
  178. struct pqi_iu_header header;
  179. __le16 request_id;
  180. u8 function_code;
  181. u8 status;
  182. union {
  183. struct {
  184. u8 status_descriptor[4];
  185. __le64 iq_pi_offset;
  186. u8 reserved[40];
  187. } create_operational_iq;
  188. struct {
  189. u8 status_descriptor[4];
  190. __le64 oq_ci_offset;
  191. u8 reserved[40];
  192. } create_operational_oq;
  193. } data;
  194. };
  195. struct pqi_iu_layer_descriptor {
  196. u8 inbound_spanning_supported : 1;
  197. u8 reserved : 7;
  198. u8 reserved1[5];
  199. __le16 max_inbound_iu_length;
  200. u8 outbound_spanning_supported : 1;
  201. u8 reserved2 : 7;
  202. u8 reserved3[5];
  203. __le16 max_outbound_iu_length;
  204. };
  205. struct pqi_device_capability {
  206. __le16 data_length;
  207. u8 reserved[6];
  208. u8 iq_arbitration_priority_support_bitmask;
  209. u8 maximum_aw_a;
  210. u8 maximum_aw_b;
  211. u8 maximum_aw_c;
  212. u8 max_arbitration_burst : 3;
  213. u8 reserved1 : 4;
  214. u8 iqa : 1;
  215. u8 reserved2[2];
  216. u8 iq_freeze : 1;
  217. u8 reserved3 : 7;
  218. __le16 max_inbound_queues;
  219. __le16 max_elements_per_iq;
  220. u8 reserved4[4];
  221. __le16 max_iq_element_length;
  222. __le16 min_iq_element_length;
  223. u8 reserved5[2];
  224. __le16 max_outbound_queues;
  225. __le16 max_elements_per_oq;
  226. __le16 intr_coalescing_time_granularity;
  227. __le16 max_oq_element_length;
  228. __le16 min_oq_element_length;
  229. u8 reserved6[24];
  230. struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
  231. };
  232. #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4
  233. struct pqi_raid_path_request {
  234. struct pqi_iu_header header;
  235. __le16 request_id;
  236. __le16 nexus_id;
  237. __le32 buffer_length;
  238. u8 lun_number[8];
  239. __le16 protocol_specific;
  240. u8 data_direction : 2;
  241. u8 partial : 1;
  242. u8 reserved1 : 4;
  243. u8 fence : 1;
  244. __le16 error_index;
  245. u8 reserved2;
  246. u8 task_attribute : 3;
  247. u8 command_priority : 4;
  248. u8 reserved3 : 1;
  249. u8 reserved4 : 2;
  250. u8 additional_cdb_bytes_usage : 3;
  251. u8 reserved5 : 3;
  252. u8 cdb[32];
  253. struct pqi_sg_descriptor
  254. sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
  255. };
  256. struct pqi_aio_path_request {
  257. struct pqi_iu_header header;
  258. __le16 request_id;
  259. u8 reserved1[2];
  260. __le32 nexus_id;
  261. __le32 buffer_length;
  262. u8 data_direction : 2;
  263. u8 partial : 1;
  264. u8 memory_type : 1;
  265. u8 fence : 1;
  266. u8 encryption_enable : 1;
  267. u8 reserved2 : 2;
  268. u8 task_attribute : 3;
  269. u8 command_priority : 4;
  270. u8 reserved3 : 1;
  271. __le16 data_encryption_key_index;
  272. __le32 encrypt_tweak_lower;
  273. __le32 encrypt_tweak_upper;
  274. u8 cdb[16];
  275. __le16 error_index;
  276. u8 num_sg_descriptors;
  277. u8 cdb_length;
  278. u8 lun_number[8];
  279. u8 reserved4[4];
  280. struct pqi_sg_descriptor
  281. sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
  282. };
  283. struct pqi_io_response {
  284. struct pqi_iu_header header;
  285. __le16 request_id;
  286. __le16 error_index;
  287. u8 reserved2[4];
  288. };
  289. struct pqi_general_management_request {
  290. struct pqi_iu_header header;
  291. __le16 request_id;
  292. union {
  293. struct {
  294. u8 reserved[2];
  295. __le32 buffer_length;
  296. struct pqi_sg_descriptor sg_descriptors[3];
  297. } report_event_configuration;
  298. struct {
  299. __le16 global_event_oq_id;
  300. __le32 buffer_length;
  301. struct pqi_sg_descriptor sg_descriptors[3];
  302. } set_event_configuration;
  303. } data;
  304. };
  305. struct pqi_event_descriptor {
  306. u8 event_type;
  307. u8 reserved;
  308. __le16 oq_id;
  309. };
  310. struct pqi_event_config {
  311. u8 reserved[2];
  312. u8 num_event_descriptors;
  313. u8 reserved1;
  314. struct pqi_event_descriptor descriptors[1];
  315. };
  316. #define PQI_MAX_EVENT_DESCRIPTORS 255
  317. struct pqi_event_response {
  318. struct pqi_iu_header header;
  319. u8 event_type;
  320. u8 reserved2 : 7;
  321. u8 request_acknowlege : 1;
  322. __le16 event_id;
  323. __le32 additional_event_id;
  324. u8 data[16];
  325. };
  326. struct pqi_event_acknowledge_request {
  327. struct pqi_iu_header header;
  328. u8 event_type;
  329. u8 reserved2;
  330. __le16 event_id;
  331. __le32 additional_event_id;
  332. };
  333. struct pqi_task_management_request {
  334. struct pqi_iu_header header;
  335. __le16 request_id;
  336. __le16 nexus_id;
  337. u8 reserved[4];
  338. u8 lun_number[8];
  339. __le16 protocol_specific;
  340. __le16 outbound_queue_id_to_manage;
  341. __le16 request_id_to_manage;
  342. u8 task_management_function;
  343. u8 reserved2 : 7;
  344. u8 fence : 1;
  345. };
  346. #define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
  347. struct pqi_task_management_response {
  348. struct pqi_iu_header header;
  349. __le16 request_id;
  350. __le16 nexus_id;
  351. u8 additional_response_info[3];
  352. u8 response_code;
  353. };
  354. struct pqi_aio_error_info {
  355. u8 status;
  356. u8 service_response;
  357. u8 data_present;
  358. u8 reserved;
  359. __le32 residual_count;
  360. __le16 data_length;
  361. __le16 reserved1;
  362. u8 data[256];
  363. };
  364. struct pqi_raid_error_info {
  365. u8 data_in_result;
  366. u8 data_out_result;
  367. u8 reserved[3];
  368. u8 status;
  369. __le16 status_qualifier;
  370. __le16 sense_data_length;
  371. __le16 response_data_length;
  372. __le32 data_in_transferred;
  373. __le32 data_out_transferred;
  374. u8 data[256];
  375. };
  376. #define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
  377. #define PQI_REQUEST_IU_RAID_PATH_IO 0x14
  378. #define PQI_REQUEST_IU_AIO_PATH_IO 0x15
  379. #define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
  380. #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
  381. #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
  382. #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
  383. #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
  384. #define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93
  385. #define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0
  386. #define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0
  387. #define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1
  388. #define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2
  389. #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
  390. #define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4
  391. #define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5
  392. #define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0
  393. #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10
  394. #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ 0x11
  395. #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ 0x12
  396. #define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ 0x13
  397. #define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY 0x14
  398. #define PQI_GENERAL_ADMIN_STATUS_SUCCESS 0x0
  399. #define PQI_IQ_PROPERTY_IS_AIO_QUEUE 0x1
  400. #define PQI_GENERAL_ADMIN_IU_LENGTH 0x3c
  401. #define PQI_PROTOCOL_SOP 0x0
  402. #define PQI_DATA_IN_OUT_GOOD 0x0
  403. #define PQI_DATA_IN_OUT_UNDERFLOW 0x1
  404. #define PQI_DATA_IN_OUT_BUFFER_ERROR 0x40
  405. #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW 0x41
  406. #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42
  407. #define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43
  408. #define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60
  409. #define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61
  410. #define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62
  411. #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED 0x63
  412. #define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64
  413. #define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65
  414. #define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66
  415. #define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67
  416. #define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x6F
  417. #define PQI_DATA_IN_OUT_ERROR 0xf0
  418. #define PQI_DATA_IN_OUT_PROTOCOL_ERROR 0xf1
  419. #define PQI_DATA_IN_OUT_HARDWARE_ERROR 0xf2
  420. #define PQI_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
  421. #define PQI_DATA_IN_OUT_ABORTED 0xf4
  422. #define PQI_DATA_IN_OUT_TIMEOUT 0xf5
  423. #define CISS_CMD_STATUS_SUCCESS 0x0
  424. #define CISS_CMD_STATUS_TARGET_STATUS 0x1
  425. #define CISS_CMD_STATUS_DATA_UNDERRUN 0x2
  426. #define CISS_CMD_STATUS_DATA_OVERRUN 0x3
  427. #define CISS_CMD_STATUS_INVALID 0x4
  428. #define CISS_CMD_STATUS_PROTOCOL_ERROR 0x5
  429. #define CISS_CMD_STATUS_HARDWARE_ERROR 0x6
  430. #define CISS_CMD_STATUS_CONNECTION_LOST 0x7
  431. #define CISS_CMD_STATUS_ABORTED 0x8
  432. #define CISS_CMD_STATUS_ABORT_FAILED 0x9
  433. #define CISS_CMD_STATUS_UNSOLICITED_ABORT 0xa
  434. #define CISS_CMD_STATUS_TIMEOUT 0xb
  435. #define CISS_CMD_STATUS_UNABORTABLE 0xc
  436. #define CISS_CMD_STATUS_TMF 0xd
  437. #define CISS_CMD_STATUS_AIO_DISABLED 0xe
  438. #define PQI_CMD_STATUS_ABORTED CISS_CMD_STATUS_ABORTED
  439. #define PQI_NUM_EVENT_QUEUE_ELEMENTS 32
  440. #define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response)
  441. #define PQI_EVENT_TYPE_HOTPLUG 0x1
  442. #define PQI_EVENT_TYPE_HARDWARE 0x2
  443. #define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4
  444. #define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
  445. #define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
  446. #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
  447. #pragma pack()
  448. #define PQI_ERROR_BUFFER_ELEMENT_LENGTH \
  449. sizeof(struct pqi_raid_error_info)
  450. /* these values are based on our implementation */
  451. #define PQI_ADMIN_IQ_NUM_ELEMENTS 8
  452. #define PQI_ADMIN_OQ_NUM_ELEMENTS 20
  453. #define PQI_ADMIN_IQ_ELEMENT_LENGTH 64
  454. #define PQI_ADMIN_OQ_ELEMENT_LENGTH 64
  455. #define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH 128
  456. #define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH 16
  457. #define PQI_MIN_MSIX_VECTORS 1
  458. #define PQI_MAX_MSIX_VECTORS 64
  459. /* these values are defined by the PQI spec */
  460. #define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255
  461. #define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
  462. #define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64
  463. #define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16
  464. #define PQI_ADMIN_INDEX_ALIGNMENT 64
  465. #define PQI_OPERATIONAL_INDEX_ALIGNMENT 4
  466. #define PQI_MIN_OPERATIONAL_QUEUE_ID 1
  467. #define PQI_MAX_OPERATIONAL_QUEUE_ID 65535
  468. #define PQI_AIO_SERV_RESPONSE_COMPLETE 0
  469. #define PQI_AIO_SERV_RESPONSE_FAILURE 1
  470. #define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2
  471. #define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3
  472. #define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4
  473. #define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
  474. #define PQI_AIO_STATUS_IO_ERROR 0x1
  475. #define PQI_AIO_STATUS_IO_ABORTED 0x2
  476. #define PQI_AIO_STATUS_NO_PATH_TO_DEVICE 0x3
  477. #define PQI_AIO_STATUS_INVALID_DEVICE 0x4
  478. #define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe
  479. #define PQI_AIO_STATUS_UNDERRUN 0x51
  480. #define PQI_AIO_STATUS_OVERRUN 0x75
  481. typedef u32 pqi_index_t;
  482. /* SOP data direction flags */
  483. #define SOP_NO_DIRECTION_FLAG 0
  484. #define SOP_WRITE_FLAG 1 /* host writes data to Data-Out */
  485. /* buffer */
  486. #define SOP_READ_FLAG 2 /* host receives data from Data-In */
  487. /* buffer */
  488. #define SOP_BIDIRECTIONAL 3 /* data is transferred from the */
  489. /* Data-Out buffer and data is */
  490. /* transferred to the Data-In buffer */
  491. #define SOP_TASK_ATTRIBUTE_SIMPLE 0
  492. #define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
  493. #define SOP_TASK_ATTRIBUTE_ORDERED 2
  494. #define SOP_TASK_ATTRIBUTE_ACA 4
  495. #define SOP_TMF_COMPLETE 0x0
  496. #define SOP_TMF_FUNCTION_SUCCEEDED 0x8
  497. /* additional CDB bytes usage field codes */
  498. #define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
  499. #define SOP_ADDITIONAL_CDB_BYTES_4 1 /* 20-byte CDB */
  500. #define SOP_ADDITIONAL_CDB_BYTES_8 2 /* 24-byte CDB */
  501. #define SOP_ADDITIONAL_CDB_BYTES_12 3 /* 28-byte CDB */
  502. #define SOP_ADDITIONAL_CDB_BYTES_16 4 /* 32-byte CDB */
  503. /*
  504. * The purpose of this structure is to obtain proper alignment of objects in
  505. * an admin queue pair.
  506. */
  507. struct pqi_admin_queues_aligned {
  508. __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
  509. u8 iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
  510. [PQI_ADMIN_IQ_NUM_ELEMENTS];
  511. __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
  512. u8 oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
  513. [PQI_ADMIN_OQ_NUM_ELEMENTS];
  514. __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
  515. __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
  516. };
  517. struct pqi_admin_queues {
  518. void *iq_element_array;
  519. void *oq_element_array;
  520. pqi_index_t *iq_ci;
  521. pqi_index_t __iomem *oq_pi;
  522. dma_addr_t iq_element_array_bus_addr;
  523. dma_addr_t oq_element_array_bus_addr;
  524. dma_addr_t iq_ci_bus_addr;
  525. dma_addr_t oq_pi_bus_addr;
  526. __le32 __iomem *iq_pi;
  527. pqi_index_t iq_pi_copy;
  528. __le32 __iomem *oq_ci;
  529. pqi_index_t oq_ci_copy;
  530. struct task_struct *task;
  531. u16 int_msg_num;
  532. };
  533. struct pqi_queue_group {
  534. struct pqi_ctrl_info *ctrl_info; /* backpointer */
  535. u16 iq_id[2];
  536. u16 oq_id;
  537. u16 int_msg_num;
  538. void *iq_element_array[2];
  539. void *oq_element_array;
  540. dma_addr_t iq_element_array_bus_addr[2];
  541. dma_addr_t oq_element_array_bus_addr;
  542. __le32 __iomem *iq_pi[2];
  543. pqi_index_t iq_pi_copy[2];
  544. pqi_index_t __iomem *iq_ci[2];
  545. pqi_index_t __iomem *oq_pi;
  546. dma_addr_t iq_ci_bus_addr[2];
  547. dma_addr_t oq_pi_bus_addr;
  548. __le32 __iomem *oq_ci;
  549. pqi_index_t oq_ci_copy;
  550. spinlock_t submit_lock[2]; /* protect submission queue */
  551. struct list_head request_list[2];
  552. };
  553. struct pqi_event_queue {
  554. u16 oq_id;
  555. u16 int_msg_num;
  556. void *oq_element_array;
  557. pqi_index_t __iomem *oq_pi;
  558. dma_addr_t oq_element_array_bus_addr;
  559. dma_addr_t oq_pi_bus_addr;
  560. __le32 __iomem *oq_ci;
  561. pqi_index_t oq_ci_copy;
  562. };
  563. #define PQI_DEFAULT_QUEUE_GROUP 0
  564. #define PQI_MAX_QUEUE_GROUPS PQI_MAX_MSIX_VECTORS
  565. struct pqi_encryption_info {
  566. u16 data_encryption_key_index;
  567. u32 encrypt_tweak_lower;
  568. u32 encrypt_tweak_upper;
  569. };
  570. #pragma pack(1)
  571. #define PQI_CONFIG_TABLE_SIGNATURE "CFGTABLE"
  572. #define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0)
  573. /* configuration table section IDs */
  574. #define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0
  575. #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1
  576. #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2
  577. #define PQI_CONFIG_TABLE_SECTION_DEBUG 3
  578. #define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4
  579. struct pqi_config_table {
  580. u8 signature[8]; /* "CFGTABLE" */
  581. __le32 first_section_offset; /* offset in bytes from the base */
  582. /* address of this table to the */
  583. /* first section */
  584. };
  585. struct pqi_config_table_section_header {
  586. __le16 section_id; /* as defined by the */
  587. /* PQI_CONFIG_TABLE_SECTION_* */
  588. /* manifest constants above */
  589. __le16 next_section_offset; /* offset in bytes from base */
  590. /* address of the table of the */
  591. /* next section or 0 if last entry */
  592. };
  593. struct pqi_config_table_general_info {
  594. struct pqi_config_table_section_header header;
  595. __le32 section_length; /* size of this section in bytes */
  596. /* including the section header */
  597. __le32 max_outstanding_requests; /* max. outstanding */
  598. /* commands supported by */
  599. /* the controller */
  600. __le32 max_sg_size; /* max. transfer size of a single */
  601. /* command */
  602. __le32 max_sg_per_request; /* max. number of scatter-gather */
  603. /* entries supported in a single */
  604. /* command */
  605. };
  606. struct pqi_config_table_debug {
  607. struct pqi_config_table_section_header header;
  608. __le32 scratchpad;
  609. };
  610. struct pqi_config_table_heartbeat {
  611. struct pqi_config_table_section_header header;
  612. __le32 heartbeat_counter;
  613. };
  614. union pqi_reset_register {
  615. struct {
  616. u32 reset_type : 3;
  617. u32 reserved : 2;
  618. u32 reset_action : 3;
  619. u32 hold_in_pd1 : 1;
  620. u32 reserved2 : 23;
  621. } bits;
  622. u32 all_bits;
  623. };
  624. #define PQI_RESET_ACTION_RESET 0x1
  625. #define PQI_RESET_TYPE_NO_RESET 0x0
  626. #define PQI_RESET_TYPE_SOFT_RESET 0x1
  627. #define PQI_RESET_TYPE_FIRM_RESET 0x2
  628. #define PQI_RESET_TYPE_HARD_RESET 0x3
  629. #define PQI_RESET_ACTION_COMPLETED 0x2
  630. #define PQI_RESET_POLL_INTERVAL_MSECS 100
  631. #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
  632. #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
  633. #define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
  634. #define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
  635. #define RAID_MAP_MAX_ENTRIES 1024
  636. #define PQI_PHYSICAL_DEVICE_BUS 0
  637. #define PQI_RAID_VOLUME_BUS 1
  638. #define PQI_HBA_BUS 2
  639. #define PQI_EXTERNAL_RAID_VOLUME_BUS 3
  640. #define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS
  641. struct report_lun_header {
  642. __be32 list_length;
  643. u8 extended_response;
  644. u8 reserved[3];
  645. };
  646. struct report_log_lun_extended_entry {
  647. u8 lunid[8];
  648. u8 volume_id[16];
  649. };
  650. struct report_log_lun_extended {
  651. struct report_lun_header header;
  652. struct report_log_lun_extended_entry lun_entries[1];
  653. };
  654. struct report_phys_lun_extended_entry {
  655. u8 lunid[8];
  656. __be64 wwid;
  657. u8 device_type;
  658. u8 device_flags;
  659. u8 lun_count; /* number of LUNs in a multi-LUN device */
  660. u8 redundant_paths;
  661. u32 aio_handle;
  662. };
  663. /* for device_flags field of struct report_phys_lun_extended_entry */
  664. #define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
  665. struct report_phys_lun_extended {
  666. struct report_lun_header header;
  667. struct report_phys_lun_extended_entry lun_entries[1];
  668. };
  669. struct raid_map_disk_data {
  670. u32 aio_handle;
  671. u8 xor_mult[2];
  672. u8 reserved[2];
  673. };
  674. /* constants for flags field of RAID map */
  675. #define RAID_MAP_ENCRYPTION_ENABLED 0x1
  676. struct raid_map {
  677. __le32 structure_size; /* size of entire structure in bytes */
  678. __le32 volume_blk_size; /* bytes / block in the volume */
  679. __le64 volume_blk_cnt; /* logical blocks on the volume */
  680. u8 phys_blk_shift; /* shift factor to convert between */
  681. /* units of logical blocks and */
  682. /* physical disk blocks */
  683. u8 parity_rotation_shift; /* shift factor to convert between */
  684. /* units of logical stripes and */
  685. /* physical stripes */
  686. __le16 strip_size; /* blocks used on each disk / stripe */
  687. __le64 disk_starting_blk; /* first disk block used in volume */
  688. __le64 disk_blk_cnt; /* disk blocks used by volume / disk */
  689. __le16 data_disks_per_row; /* data disk entries / row in the map */
  690. __le16 metadata_disks_per_row; /* mirror/parity disk entries / row */
  691. /* in the map */
  692. __le16 row_cnt; /* rows in each layout map */
  693. __le16 layout_map_count; /* layout maps (1 map per */
  694. /* mirror parity group) */
  695. __le16 flags;
  696. __le16 data_encryption_key_index;
  697. u8 reserved[16];
  698. struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
  699. };
  700. #pragma pack()
  701. #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
  702. struct pqi_scsi_dev {
  703. int devtype; /* as reported by INQUIRY commmand */
  704. u8 device_type; /* as reported by */
  705. /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
  706. /* only valid for devtype = TYPE_DISK */
  707. int bus;
  708. int target;
  709. int lun;
  710. u8 scsi3addr[8];
  711. __be64 wwid;
  712. u8 volume_id[16];
  713. u8 is_physical_device : 1;
  714. u8 is_external_raid_device : 1;
  715. u8 target_lun_valid : 1;
  716. u8 device_gone : 1;
  717. u8 new_device : 1;
  718. u8 keep_device : 1;
  719. u8 volume_offline : 1;
  720. bool aio_enabled; /* only valid for physical disks */
  721. bool in_reset;
  722. bool device_offline;
  723. u8 vendor[8]; /* bytes 8-15 of inquiry data */
  724. u8 model[16]; /* bytes 16-31 of inquiry data */
  725. u64 sas_address;
  726. u8 raid_level;
  727. u16 queue_depth; /* max. queue_depth for this device */
  728. u16 advertised_queue_depth;
  729. u32 aio_handle;
  730. u8 volume_status;
  731. u8 active_path_index;
  732. u8 path_map;
  733. u8 bay;
  734. u8 box[8];
  735. u16 phys_connector[8];
  736. bool raid_bypass_configured; /* RAID bypass configured */
  737. bool raid_bypass_enabled; /* RAID bypass enabled */
  738. int offload_to_mirror; /* Send next RAID bypass request */
  739. /* to mirror drive. */
  740. struct raid_map *raid_map; /* RAID bypass map */
  741. struct pqi_sas_port *sas_port;
  742. struct scsi_device *sdev;
  743. struct list_head scsi_device_list_entry;
  744. struct list_head new_device_list_entry;
  745. struct list_head add_list_entry;
  746. struct list_head delete_list_entry;
  747. atomic_t scsi_cmds_outstanding;
  748. };
  749. /* VPD inquiry pages */
  750. #define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
  751. #define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
  752. #define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
  753. #define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
  754. #define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
  755. #define VPD_PAGE (1 << 8)
  756. #pragma pack(1)
  757. /* structure for CISS_VPD_LV_STATUS */
  758. struct ciss_vpd_logical_volume_status {
  759. u8 peripheral_info;
  760. u8 page_code;
  761. u8 reserved;
  762. u8 page_length;
  763. u8 volume_status;
  764. u8 reserved2[3];
  765. __be32 flags;
  766. };
  767. #pragma pack()
  768. /* constants for volume_status field of ciss_vpd_logical_volume_status */
  769. #define CISS_LV_OK 0
  770. #define CISS_LV_FAILED 1
  771. #define CISS_LV_NOT_CONFIGURED 2
  772. #define CISS_LV_DEGRADED 3
  773. #define CISS_LV_READY_FOR_RECOVERY 4
  774. #define CISS_LV_UNDERGOING_RECOVERY 5
  775. #define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED 6
  776. #define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 7
  777. #define CISS_LV_HARDWARE_OVERHEATING 8
  778. #define CISS_LV_HARDWARE_HAS_OVERHEATED 9
  779. #define CISS_LV_UNDERGOING_EXPANSION 10
  780. #define CISS_LV_NOT_AVAILABLE 11
  781. #define CISS_LV_QUEUED_FOR_EXPANSION 12
  782. #define CISS_LV_DISABLED_SCSI_ID_CONFLICT 13
  783. #define CISS_LV_EJECTED 14
  784. #define CISS_LV_UNDERGOING_ERASE 15
  785. /* state 16 not used */
  786. #define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD 17
  787. #define CISS_LV_UNDERGOING_RPI 18
  788. #define CISS_LV_PENDING_RPI 19
  789. #define CISS_LV_ENCRYPTED_NO_KEY 20
  790. /* state 21 not used */
  791. #define CISS_LV_UNDERGOING_ENCRYPTION 22
  792. #define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING 23
  793. #define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 24
  794. #define CISS_LV_PENDING_ENCRYPTION 25
  795. #define CISS_LV_PENDING_ENCRYPTION_REKEYING 26
  796. #define CISS_LV_NOT_SUPPORTED 27
  797. #define CISS_LV_STATUS_UNAVAILABLE 255
  798. /* constants for flags field of ciss_vpd_logical_volume_status */
  799. #define CISS_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */
  800. /* host I/O */
  801. /* for SAS hosts and SAS expanders */
  802. struct pqi_sas_node {
  803. struct device *parent_dev;
  804. struct list_head port_list_head;
  805. };
  806. struct pqi_sas_port {
  807. struct list_head port_list_entry;
  808. u64 sas_address;
  809. struct sas_port *port;
  810. int next_phy_index;
  811. struct list_head phy_list_head;
  812. struct pqi_sas_node *parent_node;
  813. struct sas_rphy *rphy;
  814. };
  815. struct pqi_sas_phy {
  816. struct list_head phy_list_entry;
  817. struct sas_phy *phy;
  818. struct pqi_sas_port *parent_port;
  819. bool added_to_port;
  820. };
  821. struct pqi_io_request {
  822. atomic_t refcount;
  823. u16 index;
  824. void (*io_complete_callback)(struct pqi_io_request *io_request,
  825. void *context);
  826. void *context;
  827. u8 raid_bypass : 1;
  828. int status;
  829. struct pqi_queue_group *queue_group;
  830. struct scsi_cmnd *scmd;
  831. void *error_info;
  832. struct pqi_sg_descriptor *sg_chain_buffer;
  833. dma_addr_t sg_chain_buffer_dma_handle;
  834. void *iu;
  835. struct list_head request_list_entry;
  836. };
  837. #define PQI_NUM_SUPPORTED_EVENTS 6
  838. struct pqi_event {
  839. bool pending;
  840. u8 event_type;
  841. __le16 event_id;
  842. __le32 additional_event_id;
  843. };
  844. #define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
  845. #define PQI_RESERVED_IO_SLOTS_EVENT_ACK PQI_NUM_SUPPORTED_EVENTS
  846. #define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS 3
  847. #define PQI_RESERVED_IO_SLOTS \
  848. (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
  849. PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
  850. struct pqi_ctrl_info {
  851. unsigned int ctrl_id;
  852. struct pci_dev *pci_dev;
  853. char firmware_version[11];
  854. void __iomem *iomem_base;
  855. struct pqi_ctrl_registers __iomem *registers;
  856. struct pqi_device_registers __iomem *pqi_registers;
  857. u32 max_sg_entries;
  858. u32 config_table_offset;
  859. u32 config_table_length;
  860. u16 max_inbound_queues;
  861. u16 max_elements_per_iq;
  862. u16 max_iq_element_length;
  863. u16 max_outbound_queues;
  864. u16 max_elements_per_oq;
  865. u16 max_oq_element_length;
  866. u32 max_transfer_size;
  867. u32 max_outstanding_requests;
  868. u32 max_io_slots;
  869. unsigned int scsi_ml_can_queue;
  870. unsigned short sg_tablesize;
  871. unsigned int max_sectors;
  872. u32 error_buffer_length;
  873. void *error_buffer;
  874. dma_addr_t error_buffer_dma_handle;
  875. size_t sg_chain_buffer_length;
  876. unsigned int num_queue_groups;
  877. u16 max_hw_queue_index;
  878. u16 num_elements_per_iq;
  879. u16 num_elements_per_oq;
  880. u16 max_inbound_iu_length_per_firmware;
  881. u16 max_inbound_iu_length;
  882. unsigned int max_sg_per_iu;
  883. void *admin_queue_memory_base;
  884. u32 admin_queue_memory_length;
  885. dma_addr_t admin_queue_memory_base_dma_handle;
  886. void *queue_memory_base;
  887. u32 queue_memory_length;
  888. dma_addr_t queue_memory_base_dma_handle;
  889. struct pqi_admin_queues admin_queues;
  890. struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
  891. struct pqi_event_queue event_queue;
  892. enum pqi_irq_mode irq_mode;
  893. int max_msix_vectors;
  894. int num_msix_vectors_enabled;
  895. int num_msix_vectors_initialized;
  896. int event_irq;
  897. struct Scsi_Host *scsi_host;
  898. struct mutex scan_mutex;
  899. struct mutex lun_reset_mutex;
  900. bool controller_online;
  901. bool block_requests;
  902. u8 inbound_spanning_supported : 1;
  903. u8 outbound_spanning_supported : 1;
  904. u8 pqi_mode_enabled : 1;
  905. u8 pqi_reset_quiesce_supported : 1;
  906. struct list_head scsi_device_list;
  907. spinlock_t scsi_device_list_lock;
  908. struct delayed_work rescan_work;
  909. struct delayed_work update_time_work;
  910. struct pqi_sas_node *sas_host;
  911. u64 sas_address;
  912. struct pqi_io_request *io_request_pool;
  913. u16 next_io_request_slot;
  914. struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS];
  915. struct work_struct event_work;
  916. atomic_t num_interrupts;
  917. int previous_num_interrupts;
  918. u32 previous_heartbeat_count;
  919. __le32 __iomem *heartbeat_counter;
  920. struct timer_list heartbeat_timer;
  921. struct work_struct ctrl_offline_work;
  922. struct semaphore sync_request_sem;
  923. atomic_t num_busy_threads;
  924. atomic_t num_blocked_threads;
  925. wait_queue_head_t block_requests_wait;
  926. struct list_head raid_bypass_retry_list;
  927. spinlock_t raid_bypass_retry_list_lock;
  928. struct work_struct raid_bypass_retry_work;
  929. };
  930. enum pqi_ctrl_mode {
  931. SIS_MODE = 0,
  932. PQI_MODE
  933. };
  934. /*
  935. * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
  936. */
  937. #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
  938. /* CISS commands */
  939. #define CISS_READ 0xc0
  940. #define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
  941. #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
  942. #define CISS_GET_RAID_MAP 0xc8
  943. /* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
  944. #define CISS_REPORT_LOG_EXTENDED 0x1
  945. #define CISS_REPORT_PHYS_EXTENDED 0x2
  946. /* BMIC commands */
  947. #define BMIC_IDENTIFY_CONTROLLER 0x11
  948. #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
  949. #define BMIC_READ 0x26
  950. #define BMIC_WRITE 0x27
  951. #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
  952. #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
  953. #define BMIC_WRITE_HOST_WELLNESS 0xa5
  954. #define BMIC_FLUSH_CACHE 0xc2
  955. #define SA_FLUSH_CACHE 0x1
  956. #define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0)
  957. #define CISS_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3f)
  958. #define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6])
  959. #define CISS_GET_DRIVE_NUMBER(lunid) \
  960. (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
  961. CISS_GET_LEVEL_2_TARGET((lunid)))
  962. #define NO_TIMEOUT ((unsigned long) -1)
  963. #pragma pack(1)
  964. struct bmic_identify_controller {
  965. u8 configured_logical_drive_count;
  966. __le32 configuration_signature;
  967. u8 firmware_version[4];
  968. u8 reserved[145];
  969. __le16 extended_logical_unit_count;
  970. u8 reserved1[34];
  971. __le16 firmware_build_number;
  972. u8 reserved2[100];
  973. u8 controller_mode;
  974. u8 reserved3[32];
  975. };
  976. struct bmic_identify_physical_device {
  977. u8 scsi_bus; /* SCSI Bus number on controller */
  978. u8 scsi_id; /* SCSI ID on this bus */
  979. __le16 block_size; /* sector size in bytes */
  980. __le32 total_blocks; /* number for sectors on drive */
  981. __le32 reserved_blocks; /* controller reserved (RIS) */
  982. u8 model[40]; /* Physical Drive Model */
  983. u8 serial_number[40]; /* Drive Serial Number */
  984. u8 firmware_revision[8]; /* drive firmware revision */
  985. u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
  986. u8 compaq_drive_stamp; /* 0 means drive not stamped */
  987. u8 last_failure_reason;
  988. u8 flags;
  989. u8 more_flags;
  990. u8 scsi_lun; /* SCSI LUN for phys drive */
  991. u8 yet_more_flags;
  992. u8 even_more_flags;
  993. __le32 spi_speed_rules;
  994. u8 phys_connector[2]; /* connector number on controller */
  995. u8 phys_box_on_bus; /* phys enclosure this drive resides */
  996. u8 phys_bay_in_box; /* phys drv bay this drive resides */
  997. __le32 rpm; /* drive rotational speed in RPM */
  998. u8 device_type; /* type of drive */
  999. u8 sata_version; /* only valid when device_type = */
  1000. /* BMIC_DEVICE_TYPE_SATA */
  1001. __le64 big_total_block_count;
  1002. __le64 ris_starting_lba;
  1003. __le32 ris_size;
  1004. u8 wwid[20];
  1005. u8 controller_phy_map[32];
  1006. __le16 phy_count;
  1007. u8 phy_connected_dev_type[256];
  1008. u8 phy_to_drive_bay_num[256];
  1009. __le16 phy_to_attached_dev_index[256];
  1010. u8 box_index;
  1011. u8 reserved;
  1012. __le16 extra_physical_drive_flags;
  1013. u8 negotiated_link_rate[256];
  1014. u8 phy_to_phy_map[256];
  1015. u8 redundant_path_present_map;
  1016. u8 redundant_path_failure_map;
  1017. u8 active_path_number;
  1018. __le16 alternate_paths_phys_connector[8];
  1019. u8 alternate_paths_phys_box_on_port[8];
  1020. u8 multi_lun_device_lun_count;
  1021. u8 minimum_good_fw_revision[8];
  1022. u8 unique_inquiry_bytes[20];
  1023. u8 current_temperature_degrees;
  1024. u8 temperature_threshold_degrees;
  1025. u8 max_temperature_degrees;
  1026. u8 logical_blocks_per_phys_block_exp;
  1027. __le16 current_queue_depth_limit;
  1028. u8 switch_name[10];
  1029. __le16 switch_port;
  1030. u8 alternate_paths_switch_name[40];
  1031. u8 alternate_paths_switch_port[8];
  1032. __le16 power_on_hours;
  1033. __le16 percent_endurance_used;
  1034. u8 drive_authentication;
  1035. u8 smart_carrier_authentication;
  1036. u8 smart_carrier_app_fw_version;
  1037. u8 smart_carrier_bootloader_fw_version;
  1038. u8 sanitize_flags;
  1039. u8 encryption_key_flags;
  1040. u8 encryption_key_name[64];
  1041. __le32 misc_drive_flags;
  1042. __le16 dek_index;
  1043. __le16 hba_drive_encryption_flags;
  1044. __le16 max_overwrite_time;
  1045. __le16 max_block_erase_time;
  1046. __le16 max_crypto_erase_time;
  1047. u8 connector_info[5];
  1048. u8 connector_name[8][8];
  1049. u8 page_83_identifier[16];
  1050. u8 maximum_link_rate[256];
  1051. u8 negotiated_physical_link_rate[256];
  1052. u8 box_connector_name[8];
  1053. u8 padding_to_multiple_of_512[9];
  1054. };
  1055. struct bmic_flush_cache {
  1056. u8 disable_flag;
  1057. u8 system_power_action;
  1058. u8 ndu_flush;
  1059. u8 shutdown_event;
  1060. u8 reserved[28];
  1061. };
  1062. /* for shutdown_event member of struct bmic_flush_cache */
  1063. enum bmic_flush_cache_shutdown_event {
  1064. NONE_CACHE_FLUSH_ONLY = 0,
  1065. SHUTDOWN = 1,
  1066. HIBERNATE = 2,
  1067. SUSPEND = 3,
  1068. RESTART = 4
  1069. };
  1070. #pragma pack()
  1071. int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
  1072. void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
  1073. int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
  1074. struct pqi_scsi_dev *device);
  1075. void pqi_remove_sas_device(struct pqi_scsi_dev *device);
  1076. struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
  1077. struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
  1078. void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
  1079. extern struct sas_function_template pqi_sas_transport_functions;
  1080. #endif /* _SMARTPQI_H */