q_struct.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /* SPDX-License-Identifier: GPL-2.0
  2. *
  3. * Copyright (C) 2018 Marvell International Ltd.
  4. */
  5. #ifndef Q_STRUCT_H
  6. #define Q_STRUCT_H
  7. /* Load transaction types for reading segment bytes specified by
  8. * NIC_SEND_GATHER_S[LD_TYPE].
  9. */
  10. enum nic_send_ld_type_e {
  11. NIC_SEND_LD_TYPE_E_LDD = 0x0,
  12. NIC_SEND_LD_TYPE_E_LDT = 0x1,
  13. NIC_SEND_LD_TYPE_E_LDWB = 0x2,
  14. NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
  15. };
  16. enum ether_type_algorithm {
  17. ETYPE_ALG_NONE = 0x0,
  18. ETYPE_ALG_SKIP = 0x1,
  19. ETYPE_ALG_ENDPARSE = 0x2,
  20. ETYPE_ALG_VLAN = 0x3,
  21. ETYPE_ALG_VLAN_STRIP = 0x4,
  22. };
  23. enum layer3_type {
  24. L3TYPE_NONE = 0x00,
  25. L3TYPE_GRH = 0x01,
  26. L3TYPE_IPV4 = 0x04,
  27. L3TYPE_IPV4_OPTIONS = 0x05,
  28. L3TYPE_IPV6 = 0x06,
  29. L3TYPE_IPV6_OPTIONS = 0x07,
  30. L3TYPE_ET_STOP = 0x0D,
  31. L3TYPE_OTHER = 0x0E,
  32. };
  33. enum layer4_type {
  34. L4TYPE_NONE = 0x00,
  35. L4TYPE_IPSEC_ESP = 0x01,
  36. L4TYPE_IPFRAG = 0x02,
  37. L4TYPE_IPCOMP = 0x03,
  38. L4TYPE_TCP = 0x04,
  39. L4TYPE_UDP = 0x05,
  40. L4TYPE_SCTP = 0x06,
  41. L4TYPE_GRE = 0x07,
  42. L4TYPE_ROCE_BTH = 0x08,
  43. L4TYPE_OTHER = 0x0E,
  44. };
  45. /* CPI and RSSI configuration */
  46. enum cpi_algorithm_type {
  47. CPI_ALG_NONE = 0x0,
  48. CPI_ALG_VLAN = 0x1,
  49. CPI_ALG_VLAN16 = 0x2,
  50. CPI_ALG_DIFF = 0x3,
  51. };
  52. enum rss_algorithm_type {
  53. RSS_ALG_NONE = 0x00,
  54. RSS_ALG_PORT = 0x01,
  55. RSS_ALG_IP = 0x02,
  56. RSS_ALG_TCP_IP = 0x03,
  57. RSS_ALG_UDP_IP = 0x04,
  58. RSS_ALG_SCTP_IP = 0x05,
  59. RSS_ALG_GRE_IP = 0x06,
  60. RSS_ALG_ROCE = 0x07,
  61. };
  62. enum rss_hash_cfg {
  63. RSS_HASH_L2ETC = 0x00,
  64. RSS_HASH_IP = 0x01,
  65. RSS_HASH_TCP = 0x02,
  66. RSS_TCP_SYN_DIS = 0x03,
  67. RSS_HASH_UDP = 0x04,
  68. RSS_HASH_L4ETC = 0x05,
  69. RSS_HASH_ROCE = 0x06,
  70. RSS_L3_BIDI = 0x07,
  71. RSS_L4_BIDI = 0x08,
  72. };
  73. /* Completion queue entry types */
  74. enum cqe_type {
  75. CQE_TYPE_INVALID = 0x0,
  76. CQE_TYPE_RX = 0x2,
  77. CQE_TYPE_RX_SPLIT = 0x3,
  78. CQE_TYPE_RX_TCP = 0x4,
  79. CQE_TYPE_SEND = 0x8,
  80. CQE_TYPE_SEND_PTP = 0x9,
  81. };
  82. enum cqe_rx_tcp_status {
  83. CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
  84. CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
  85. };
  86. enum cqe_send_status {
  87. CQE_SEND_STATUS_GOOD = 0x00,
  88. CQE_SEND_STATUS_DESC_FAULT = 0x01,
  89. CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
  90. CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
  91. CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
  92. CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
  93. CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
  94. CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
  95. CQE_SEND_STATUS_LOCK_VIOL = 0x84,
  96. CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
  97. CQE_SEND_STATUS_DATA_FAULT = 0x86,
  98. CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
  99. CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
  100. CQE_SEND_STATUS_MEM_FAULT = 0x89,
  101. CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
  102. CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
  103. };
  104. enum cqe_rx_tcp_end_reason {
  105. CQE_RX_TCP_END_FIN_FLAG_DET = 0,
  106. CQE_RX_TCP_END_INVALID_FLAG = 1,
  107. CQE_RX_TCP_END_TIMEOUT = 2,
  108. CQE_RX_TCP_END_OUT_OF_SEQ = 3,
  109. CQE_RX_TCP_END_PKT_ERR = 4,
  110. CQE_RX_TCP_END_QS_DISABLED = 0x0F,
  111. };
  112. /* Packet protocol level error enumeration */
  113. enum cqe_rx_err_level {
  114. CQE_RX_ERRLVL_RE = 0x0,
  115. CQE_RX_ERRLVL_L2 = 0x1,
  116. CQE_RX_ERRLVL_L3 = 0x2,
  117. CQE_RX_ERRLVL_L4 = 0x3,
  118. };
  119. /* Packet protocol level error type enumeration */
  120. enum cqe_rx_err_opcode {
  121. CQE_RX_ERR_RE_NONE = 0x0,
  122. CQE_RX_ERR_RE_PARTIAL = 0x1,
  123. CQE_RX_ERR_RE_JABBER = 0x2,
  124. CQE_RX_ERR_RE_FCS = 0x7,
  125. CQE_RX_ERR_RE_TERMINATE = 0x9,
  126. CQE_RX_ERR_RE_RX_CTL = 0xb,
  127. CQE_RX_ERR_PREL2_ERR = 0x1f,
  128. CQE_RX_ERR_L2_FRAGMENT = 0x20,
  129. CQE_RX_ERR_L2_OVERRUN = 0x21,
  130. CQE_RX_ERR_L2_PFCS = 0x22,
  131. CQE_RX_ERR_L2_PUNY = 0x23,
  132. CQE_RX_ERR_L2_MAL = 0x24,
  133. CQE_RX_ERR_L2_OVERSIZE = 0x25,
  134. CQE_RX_ERR_L2_UNDERSIZE = 0x26,
  135. CQE_RX_ERR_L2_LENMISM = 0x27,
  136. CQE_RX_ERR_L2_PCLP = 0x28,
  137. CQE_RX_ERR_IP_NOT = 0x41,
  138. CQE_RX_ERR_IP_CHK = 0x42,
  139. CQE_RX_ERR_IP_MAL = 0x43,
  140. CQE_RX_ERR_IP_MALD = 0x44,
  141. CQE_RX_ERR_IP_HOP = 0x45,
  142. CQE_RX_ERR_L3_ICRC = 0x46,
  143. CQE_RX_ERR_L3_PCLP = 0x47,
  144. CQE_RX_ERR_L4_MAL = 0x61,
  145. CQE_RX_ERR_L4_CHK = 0x62,
  146. CQE_RX_ERR_UDP_LEN = 0x63,
  147. CQE_RX_ERR_L4_PORT = 0x64,
  148. CQE_RX_ERR_TCP_FLAG = 0x65,
  149. CQE_RX_ERR_TCP_OFFSET = 0x66,
  150. CQE_RX_ERR_L4_PCLP = 0x67,
  151. CQE_RX_ERR_RBDR_TRUNC = 0x70,
  152. };
  153. struct cqe_rx_t {
  154. #if defined(__BIG_ENDIAN_BITFIELD)
  155. u64 cqe_type:4; /* W0 */
  156. u64 stdn_fault:1;
  157. u64 rsvd0:1;
  158. u64 rq_qs:7;
  159. u64 rq_idx:3;
  160. u64 rsvd1:12;
  161. u64 rss_alg:4;
  162. u64 rsvd2:4;
  163. u64 rb_cnt:4;
  164. u64 vlan_found:1;
  165. u64 vlan_stripped:1;
  166. u64 vlan2_found:1;
  167. u64 vlan2_stripped:1;
  168. u64 l4_type:4;
  169. u64 l3_type:4;
  170. u64 l2_present:1;
  171. u64 err_level:3;
  172. u64 err_opcode:8;
  173. u64 pkt_len:16; /* W1 */
  174. u64 l2_ptr:8;
  175. u64 l3_ptr:8;
  176. u64 l4_ptr:8;
  177. u64 cq_pkt_len:8;
  178. u64 align_pad:3;
  179. u64 rsvd3:1;
  180. u64 chan:12;
  181. u64 rss_tag:32; /* W2 */
  182. u64 vlan_tci:16;
  183. u64 vlan_ptr:8;
  184. u64 vlan2_ptr:8;
  185. u64 rb3_sz:16; /* W3 */
  186. u64 rb2_sz:16;
  187. u64 rb1_sz:16;
  188. u64 rb0_sz:16;
  189. u64 rb7_sz:16; /* W4 */
  190. u64 rb6_sz:16;
  191. u64 rb5_sz:16;
  192. u64 rb4_sz:16;
  193. u64 rb11_sz:16; /* W5 */
  194. u64 rb10_sz:16;
  195. u64 rb9_sz:16;
  196. u64 rb8_sz:16;
  197. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  198. u64 err_opcode:8;
  199. u64 err_level:3;
  200. u64 l2_present:1;
  201. u64 l3_type:4;
  202. u64 l4_type:4;
  203. u64 vlan2_stripped:1;
  204. u64 vlan2_found:1;
  205. u64 vlan_stripped:1;
  206. u64 vlan_found:1;
  207. u64 rb_cnt:4;
  208. u64 rsvd2:4;
  209. u64 rss_alg:4;
  210. u64 rsvd1:12;
  211. u64 rq_idx:3;
  212. u64 rq_qs:7;
  213. u64 rsvd0:1;
  214. u64 stdn_fault:1;
  215. u64 cqe_type:4; /* W0 */
  216. u64 chan:12;
  217. u64 rsvd3:1;
  218. u64 align_pad:3;
  219. u64 cq_pkt_len:8;
  220. u64 l4_ptr:8;
  221. u64 l3_ptr:8;
  222. u64 l2_ptr:8;
  223. u64 pkt_len:16; /* W1 */
  224. u64 vlan2_ptr:8;
  225. u64 vlan_ptr:8;
  226. u64 vlan_tci:16;
  227. u64 rss_tag:32; /* W2 */
  228. u64 rb0_sz:16;
  229. u64 rb1_sz:16;
  230. u64 rb2_sz:16;
  231. u64 rb3_sz:16; /* W3 */
  232. u64 rb4_sz:16;
  233. u64 rb5_sz:16;
  234. u64 rb6_sz:16;
  235. u64 rb7_sz:16; /* W4 */
  236. u64 rb8_sz:16;
  237. u64 rb9_sz:16;
  238. u64 rb10_sz:16;
  239. u64 rb11_sz:16; /* W5 */
  240. #endif
  241. u64 rb0_ptr:64;
  242. u64 rb1_ptr:64;
  243. u64 rb2_ptr:64;
  244. u64 rb3_ptr:64;
  245. u64 rb4_ptr:64;
  246. u64 rb5_ptr:64;
  247. u64 rb6_ptr:64;
  248. u64 rb7_ptr:64;
  249. u64 rb8_ptr:64;
  250. u64 rb9_ptr:64;
  251. u64 rb10_ptr:64;
  252. u64 rb11_ptr:64;
  253. };
  254. struct cqe_rx_tcp_err_t {
  255. #if defined(__BIG_ENDIAN_BITFIELD)
  256. u64 cqe_type:4; /* W0 */
  257. u64 rsvd0:60;
  258. u64 rsvd1:4; /* W1 */
  259. u64 partial_first:1;
  260. u64 rsvd2:27;
  261. u64 rbdr_bytes:8;
  262. u64 rsvd3:24;
  263. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  264. u64 rsvd0:60;
  265. u64 cqe_type:4;
  266. u64 rsvd3:24;
  267. u64 rbdr_bytes:8;
  268. u64 rsvd2:27;
  269. u64 partial_first:1;
  270. u64 rsvd1:4;
  271. #endif
  272. };
  273. struct cqe_rx_tcp_t {
  274. #if defined(__BIG_ENDIAN_BITFIELD)
  275. u64 cqe_type:4; /* W0 */
  276. u64 rsvd0:52;
  277. u64 cq_tcp_status:8;
  278. u64 rsvd1:32; /* W1 */
  279. u64 tcp_cntx_bytes:8;
  280. u64 rsvd2:8;
  281. u64 tcp_err_bytes:16;
  282. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  283. u64 cq_tcp_status:8;
  284. u64 rsvd0:52;
  285. u64 cqe_type:4; /* W0 */
  286. u64 tcp_err_bytes:16;
  287. u64 rsvd2:8;
  288. u64 tcp_cntx_bytes:8;
  289. u64 rsvd1:32; /* W1 */
  290. #endif
  291. };
  292. struct cqe_send_t {
  293. #if defined(__BIG_ENDIAN_BITFIELD)
  294. u64 cqe_type:4; /* W0 */
  295. u64 rsvd0:4;
  296. u64 sqe_ptr:16;
  297. u64 rsvd1:4;
  298. u64 rsvd2:10;
  299. u64 sq_qs:7;
  300. u64 sq_idx:3;
  301. u64 rsvd3:8;
  302. u64 send_status:8;
  303. u64 ptp_timestamp:64; /* W1 */
  304. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  305. u64 send_status:8;
  306. u64 rsvd3:8;
  307. u64 sq_idx:3;
  308. u64 sq_qs:7;
  309. u64 rsvd2:10;
  310. u64 rsvd1:4;
  311. u64 sqe_ptr:16;
  312. u64 rsvd0:4;
  313. u64 cqe_type:4; /* W0 */
  314. u64 ptp_timestamp:64; /* W1 */
  315. #endif
  316. };
  317. union cq_desc_t {
  318. u64 u[64];
  319. struct cqe_send_t snd_hdr;
  320. struct cqe_rx_t rx_hdr;
  321. struct cqe_rx_tcp_t rx_tcp_hdr;
  322. struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
  323. };
  324. struct rbdr_entry_t {
  325. #if defined(__BIG_ENDIAN_BITFIELD)
  326. u64 rsvd0:15;
  327. u64 buf_addr:42;
  328. u64 cache_align:7;
  329. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  330. u64 cache_align:7;
  331. u64 buf_addr:42;
  332. u64 rsvd0:15;
  333. #endif
  334. };
  335. /* TCP reassembly context */
  336. struct rbe_tcp_cnxt_t {
  337. #if defined(__BIG_ENDIAN_BITFIELD)
  338. u64 tcp_pkt_cnt:12;
  339. u64 rsvd1:4;
  340. u64 align_hdr_bytes:4;
  341. u64 align_ptr_bytes:4;
  342. u64 ptr_bytes:16;
  343. u64 rsvd2:24;
  344. u64 cqe_type:4;
  345. u64 rsvd0:54;
  346. u64 tcp_end_reason:2;
  347. u64 tcp_status:4;
  348. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  349. u64 tcp_status:4;
  350. u64 tcp_end_reason:2;
  351. u64 rsvd0:54;
  352. u64 cqe_type:4;
  353. u64 rsvd2:24;
  354. u64 ptr_bytes:16;
  355. u64 align_ptr_bytes:4;
  356. u64 align_hdr_bytes:4;
  357. u64 rsvd1:4;
  358. u64 tcp_pkt_cnt:12;
  359. #endif
  360. };
  361. /* Always Big endian */
  362. struct rx_hdr_t {
  363. u64 opaque:32;
  364. u64 rss_flow:8;
  365. u64 skip_length:6;
  366. u64 disable_rss:1;
  367. u64 disable_tcp_reassembly:1;
  368. u64 nodrop:1;
  369. u64 dest_alg:2;
  370. u64 rsvd0:2;
  371. u64 dest_rq:11;
  372. };
  373. enum send_l4_csum_type {
  374. SEND_L4_CSUM_DISABLE = 0x00,
  375. SEND_L4_CSUM_UDP = 0x01,
  376. SEND_L4_CSUM_TCP = 0x02,
  377. SEND_L4_CSUM_SCTP = 0x03,
  378. };
  379. enum send_crc_alg {
  380. SEND_CRCALG_CRC32 = 0x00,
  381. SEND_CRCALG_CRC32C = 0x01,
  382. SEND_CRCALG_ICRC = 0x02,
  383. };
  384. enum send_load_type {
  385. SEND_LD_TYPE_LDD = 0x00,
  386. SEND_LD_TYPE_LDT = 0x01,
  387. SEND_LD_TYPE_LDWB = 0x02,
  388. };
  389. enum send_mem_alg_type {
  390. SEND_MEMALG_SET = 0x00,
  391. SEND_MEMALG_ADD = 0x08,
  392. SEND_MEMALG_SUB = 0x09,
  393. SEND_MEMALG_ADDLEN = 0x0A,
  394. SEND_MEMALG_SUBLEN = 0x0B,
  395. };
  396. enum send_mem_dsz_type {
  397. SEND_MEMDSZ_B64 = 0x00,
  398. SEND_MEMDSZ_B32 = 0x01,
  399. SEND_MEMDSZ_B8 = 0x03,
  400. };
  401. enum sq_subdesc_type {
  402. SQ_DESC_TYPE_INVALID = 0x00,
  403. SQ_DESC_TYPE_HEADER = 0x01,
  404. SQ_DESC_TYPE_CRC = 0x02,
  405. SQ_DESC_TYPE_IMMEDIATE = 0x03,
  406. SQ_DESC_TYPE_GATHER = 0x04,
  407. SQ_DESC_TYPE_MEMORY = 0x05,
  408. };
  409. struct sq_crc_subdesc {
  410. #if defined(__BIG_ENDIAN_BITFIELD)
  411. u64 rsvd1:32;
  412. u64 crc_ival:32;
  413. u64 subdesc_type:4;
  414. u64 crc_alg:2;
  415. u64 rsvd0:10;
  416. u64 crc_insert_pos:16;
  417. u64 hdr_start:16;
  418. u64 crc_len:16;
  419. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  420. u64 crc_len:16;
  421. u64 hdr_start:16;
  422. u64 crc_insert_pos:16;
  423. u64 rsvd0:10;
  424. u64 crc_alg:2;
  425. u64 subdesc_type:4;
  426. u64 crc_ival:32;
  427. u64 rsvd1:32;
  428. #endif
  429. };
  430. struct sq_gather_subdesc {
  431. #if defined(__BIG_ENDIAN_BITFIELD)
  432. u64 subdesc_type:4; /* W0 */
  433. u64 ld_type:2;
  434. u64 rsvd0:42;
  435. u64 size:16;
  436. u64 rsvd1:15; /* W1 */
  437. u64 addr:49;
  438. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  439. u64 size:16;
  440. u64 rsvd0:42;
  441. u64 ld_type:2;
  442. u64 subdesc_type:4; /* W0 */
  443. u64 addr:49;
  444. u64 rsvd1:15; /* W1 */
  445. #endif
  446. };
  447. /* SQ immediate subdescriptor */
  448. struct sq_imm_subdesc {
  449. #if defined(__BIG_ENDIAN_BITFIELD)
  450. u64 subdesc_type:4; /* W0 */
  451. u64 rsvd0:46;
  452. u64 len:14;
  453. u64 data:64; /* W1 */
  454. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  455. u64 len:14;
  456. u64 rsvd0:46;
  457. u64 subdesc_type:4; /* W0 */
  458. u64 data:64; /* W1 */
  459. #endif
  460. };
  461. struct sq_mem_subdesc {
  462. #if defined(__BIG_ENDIAN_BITFIELD)
  463. u64 subdesc_type:4; /* W0 */
  464. u64 mem_alg:4;
  465. u64 mem_dsz:2;
  466. u64 wmem:1;
  467. u64 rsvd0:21;
  468. u64 offset:32;
  469. u64 rsvd1:15; /* W1 */
  470. u64 addr:49;
  471. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  472. u64 offset:32;
  473. u64 rsvd0:21;
  474. u64 wmem:1;
  475. u64 mem_dsz:2;
  476. u64 mem_alg:4;
  477. u64 subdesc_type:4; /* W0 */
  478. u64 addr:49;
  479. u64 rsvd1:15; /* W1 */
  480. #endif
  481. };
  482. struct sq_hdr_subdesc {
  483. #if defined(__BIG_ENDIAN_BITFIELD)
  484. u64 subdesc_type:4;
  485. u64 tso:1;
  486. u64 post_cqe:1; /* Post CQE on no error also */
  487. u64 dont_send:1;
  488. u64 tstmp:1;
  489. u64 subdesc_cnt:8;
  490. u64 csum_l4:2;
  491. u64 csum_l3:1;
  492. u64 rsvd0:5;
  493. u64 l4_offset:8;
  494. u64 l3_offset:8;
  495. u64 rsvd1:4;
  496. u64 tot_len:20; /* W0 */
  497. u64 tso_sdc_cont:8;
  498. u64 tso_sdc_first:8;
  499. u64 tso_l4_offset:8;
  500. u64 tso_flags_last:12;
  501. u64 tso_flags_first:12;
  502. u64 rsvd2:2;
  503. u64 tso_max_paysize:14; /* W1 */
  504. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  505. u64 tot_len:20;
  506. u64 rsvd1:4;
  507. u64 l3_offset:8;
  508. u64 l4_offset:8;
  509. u64 rsvd0:5;
  510. u64 csum_l3:1;
  511. u64 csum_l4:2;
  512. u64 subdesc_cnt:8;
  513. u64 tstmp:1;
  514. u64 dont_send:1;
  515. u64 post_cqe:1; /* Post CQE on no error also */
  516. u64 tso:1;
  517. u64 subdesc_type:4; /* W0 */
  518. u64 tso_max_paysize:14;
  519. u64 rsvd2:2;
  520. u64 tso_flags_first:12;
  521. u64 tso_flags_last:12;
  522. u64 tso_l4_offset:8;
  523. u64 tso_sdc_first:8;
  524. u64 tso_sdc_cont:8; /* W1 */
  525. #endif
  526. };
  527. /* Queue config register formats */
  528. struct rq_cfg {
  529. #if defined(__BIG_ENDIAN_BITFIELD)
  530. u64 reserved_2_63:62;
  531. u64 ena:1;
  532. u64 tcp_ena:1;
  533. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  534. u64 tcp_ena:1;
  535. u64 ena:1;
  536. u64 reserved_2_63:62;
  537. #endif
  538. };
  539. struct cq_cfg {
  540. #if defined(__BIG_ENDIAN_BITFIELD)
  541. u64 reserved_43_63:21;
  542. u64 ena:1;
  543. u64 reset:1;
  544. u64 caching:1;
  545. u64 reserved_35_39:5;
  546. u64 qsize:3;
  547. u64 reserved_25_31:7;
  548. u64 avg_con:9;
  549. u64 reserved_0_15:16;
  550. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  551. u64 reserved_0_15:16;
  552. u64 avg_con:9;
  553. u64 reserved_25_31:7;
  554. u64 qsize:3;
  555. u64 reserved_35_39:5;
  556. u64 caching:1;
  557. u64 reset:1;
  558. u64 ena:1;
  559. u64 reserved_43_63:21;
  560. #endif
  561. };
  562. struct sq_cfg {
  563. #if defined(__BIG_ENDIAN_BITFIELD)
  564. u64 reserved_20_63:44;
  565. u64 ena:1;
  566. u64 reserved_18_18:1;
  567. u64 reset:1;
  568. u64 ldwb:1;
  569. u64 reserved_11_15:5;
  570. u64 qsize:3;
  571. u64 reserved_3_7:5;
  572. u64 tstmp_bgx_intf:3;
  573. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  574. u64 tstmp_bgx_intf:3;
  575. u64 reserved_3_7:5;
  576. u64 qsize:3;
  577. u64 reserved_11_15:5;
  578. u64 ldwb:1;
  579. u64 reset:1;
  580. u64 reserved_18_18:1;
  581. u64 ena:1;
  582. u64 reserved_20_63:44;
  583. #endif
  584. };
  585. struct rbdr_cfg {
  586. #if defined(__BIG_ENDIAN_BITFIELD)
  587. u64 reserved_45_63:19;
  588. u64 ena:1;
  589. u64 reset:1;
  590. u64 ldwb:1;
  591. u64 reserved_36_41:6;
  592. u64 qsize:4;
  593. u64 reserved_25_31:7;
  594. u64 avg_con:9;
  595. u64 reserved_12_15:4;
  596. u64 lines:12;
  597. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  598. u64 lines:12;
  599. u64 reserved_12_15:4;
  600. u64 avg_con:9;
  601. u64 reserved_25_31:7;
  602. u64 qsize:4;
  603. u64 reserved_36_41:6;
  604. u64 ldwb:1;
  605. u64 reset:1;
  606. u64 ena: 1;
  607. u64 reserved_45_63:19;
  608. #endif
  609. };
  610. struct qs_cfg {
  611. #if defined(__BIG_ENDIAN_BITFIELD)
  612. u64 reserved_32_63:32;
  613. u64 ena:1;
  614. u64 reserved_27_30:4;
  615. u64 sq_ins_ena:1;
  616. u64 sq_ins_pos:6;
  617. u64 lock_ena:1;
  618. u64 lock_viol_cqe_ena:1;
  619. u64 send_tstmp_ena:1;
  620. u64 be:1;
  621. u64 reserved_7_15:9;
  622. u64 vnic:7;
  623. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  624. u64 vnic:7;
  625. u64 reserved_7_15:9;
  626. u64 be:1;
  627. u64 send_tstmp_ena:1;
  628. u64 lock_viol_cqe_ena:1;
  629. u64 lock_ena:1;
  630. u64 sq_ins_pos:6;
  631. u64 sq_ins_ena:1;
  632. u64 reserved_27_30:4;
  633. u64 ena:1;
  634. u64 reserved_32_63:32;
  635. #endif
  636. };
  637. #endif /* Q_STRUCT_H */