nitrox_csr.h 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NITROX_CSR_H
  3. #define __NITROX_CSR_H
  4. #include <asm/byteorder.h>
  5. #include <linux/types.h>
  6. /* EMU clusters */
  7. #define NR_CLUSTERS 4
  8. #define AE_CORES_PER_CLUSTER 20
  9. #define SE_CORES_PER_CLUSTER 16
  10. /* BIST registers */
  11. #define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
  12. #define UCD_BIST_STATUS 0x12C0070
  13. #define NPS_CORE_BIST_REG 0x10000E8
  14. #define NPS_CORE_NPC_BIST_REG 0x1000128
  15. #define NPS_PKT_SLC_BIST_REG 0x1040088
  16. #define NPS_PKT_IN_BIST_REG 0x1040100
  17. #define POM_BIST_REG 0x11C0100
  18. #define BMI_BIST_REG 0x1140080
  19. #define EFL_CORE_BIST_REGX(_i) (0x1240100 + ((_i) * 0x400))
  20. #define EFL_TOP_BIST_STAT 0x1241090
  21. #define BMO_BIST_REG 0x1180080
  22. #define LBC_BIST_STATUS 0x1200020
  23. #define PEM_BIST_STATUSX(_i) (0x1080468 | ((_i) << 18))
  24. /* EMU registers */
  25. #define EMU_SE_ENABLEX(_i) (0x1400000 + ((_i) * 0x40000))
  26. #define EMU_AE_ENABLEX(_i) (0x1400008 + ((_i) * 0x40000))
  27. #define EMU_WD_INT_ENA_W1SX(_i) (0x1402318 + ((_i) * 0x40000))
  28. #define EMU_GE_INT_ENA_W1SX(_i) (0x1402518 + ((_i) * 0x40000))
  29. #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000))
  30. /* UCD registers */
  31. #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010
  32. #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20))
  33. #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000))
  34. /* NPS core registers */
  35. #define NPS_CORE_GBL_VFCFG 0x1000000
  36. #define NPS_CORE_CONTROL 0x1000008
  37. #define NPS_CORE_INT_ACTIVE 0x1000080
  38. #define NPS_CORE_INT 0x10000A0
  39. #define NPS_CORE_INT_ENA_W1S 0x10000B8
  40. #define NPS_STATS_PKT_DMA_RD_CNT 0x1000180
  41. #define NPS_STATS_PKT_DMA_WR_CNT 0x1000190
  42. /* NPS packet registers */
  43. #define NPS_PKT_INT 0x1040018
  44. #define NPS_PKT_IN_RERR_HI 0x1040108
  45. #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120
  46. #define NPS_PKT_IN_RERR_LO 0x1040128
  47. #define NPS_PKT_IN_RERR_LO_ENA_W1S 0x1040140
  48. #define NPS_PKT_IN_ERR_TYPE 0x1040148
  49. #define NPS_PKT_IN_ERR_TYPE_ENA_W1S 0x1040160
  50. #define NPS_PKT_IN_INSTR_CTLX(_i) (0x10060 + ((_i) * 0x40000))
  51. #define NPS_PKT_IN_INSTR_BADDRX(_i) (0x10068 + ((_i) * 0x40000))
  52. #define NPS_PKT_IN_INSTR_RSIZEX(_i) (0x10070 + ((_i) * 0x40000))
  53. #define NPS_PKT_IN_DONE_CNTSX(_i) (0x10080 + ((_i) * 0x40000))
  54. #define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i) (0x10078 + ((_i) * 0x40000))
  55. #define NPS_PKT_IN_INT_LEVELSX(_i) (0x10088 + ((_i) * 0x40000))
  56. #define NPS_PKT_SLC_RERR_HI 0x1040208
  57. #define NPS_PKT_SLC_RERR_HI_ENA_W1S 0x1040220
  58. #define NPS_PKT_SLC_RERR_LO 0x1040228
  59. #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240
  60. #define NPS_PKT_SLC_ERR_TYPE 0x1040248
  61. #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260
  62. #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000))
  63. #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000))
  64. #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000))
  65. /* POM registers */
  66. #define POM_INT_ENA_W1S 0x11C0018
  67. #define POM_GRP_EXECMASKX(_i) (0x11C1100 | ((_i) * 8))
  68. #define POM_INT 0x11C0000
  69. #define POM_PERF_CTL 0x11CC400
  70. /* BMI registers */
  71. #define BMI_INT 0x1140000
  72. #define BMI_CTL 0x1140020
  73. #define BMI_INT_ENA_W1S 0x1140018
  74. #define BMI_NPS_PKT_CNT 0x1140070
  75. /* EFL registers */
  76. #define EFL_CORE_INT_ENA_W1SX(_i) (0x1240018 + ((_i) * 0x400))
  77. #define EFL_CORE_VF_ERR_INT0X(_i) (0x1240050 + ((_i) * 0x400))
  78. #define EFL_CORE_VF_ERR_INT0_ENA_W1SX(_i) (0x1240068 + ((_i) * 0x400))
  79. #define EFL_CORE_VF_ERR_INT1X(_i) (0x1240070 + ((_i) * 0x400))
  80. #define EFL_CORE_VF_ERR_INT1_ENA_W1SX(_i) (0x1240088 + ((_i) * 0x400))
  81. #define EFL_CORE_SE_ERR_INTX(_i) (0x12400A0 + ((_i) * 0x400))
  82. #define EFL_RNM_CTL_STATUS 0x1241800
  83. #define EFL_CORE_INTX(_i) (0x1240000 + ((_i) * 0x400))
  84. /* BMO registers */
  85. #define BMO_CTL2 0x1180028
  86. #define BMO_NPS_SLC_PKT_CNT 0x1180078
  87. /* LBC registers */
  88. #define LBC_INT 0x1200000
  89. #define LBC_INVAL_CTL 0x1201010
  90. #define LBC_PLM_VF1_64_INT 0x1202008
  91. #define LBC_INVAL_STATUS 0x1202010
  92. #define LBC_INT_ENA_W1S 0x1203000
  93. #define LBC_PLM_VF1_64_INT_ENA_W1S 0x1205008
  94. #define LBC_PLM_VF65_128_INT 0x1206008
  95. #define LBC_ELM_VF1_64_INT 0x1208000
  96. #define LBC_PLM_VF65_128_INT_ENA_W1S 0x1209008
  97. #define LBC_ELM_VF1_64_INT_ENA_W1S 0x120B000
  98. #define LBC_ELM_VF65_128_INT 0x120C000
  99. #define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
  100. /* PEM registers */
  101. #define PEM0_INT 0x1080428
  102. /**
  103. * struct emu_fuse_map - EMU Fuse Map Registers
  104. * @ae_fuse: Fuse settings for AE 19..0
  105. * @se_fuse: Fuse settings for SE 15..0
  106. *
  107. * A set bit indicates the unit is fuse disabled.
  108. */
  109. union emu_fuse_map {
  110. u64 value;
  111. struct {
  112. #if (defined(__BIG_ENDIAN_BITFIELD))
  113. u64 valid : 1;
  114. u64 raz_52_62 : 11;
  115. u64 ae_fuse : 20;
  116. u64 raz_16_31 : 16;
  117. u64 se_fuse : 16;
  118. #else
  119. u64 se_fuse : 16;
  120. u64 raz_16_31 : 16;
  121. u64 ae_fuse : 20;
  122. u64 raz_52_62 : 11;
  123. u64 valid : 1;
  124. #endif
  125. } s;
  126. };
  127. /**
  128. * struct emu_se_enable - Symmetric Engine Enable Registers
  129. * @enable: Individual enables for each of the clusters
  130. * 16 symmetric engines.
  131. */
  132. union emu_se_enable {
  133. u64 value;
  134. struct {
  135. #if (defined(__BIG_ENDIAN_BITFIELD))
  136. u64 raz : 48;
  137. u64 enable : 16;
  138. #else
  139. u64 enable : 16;
  140. u64 raz : 48;
  141. #endif
  142. } s;
  143. };
  144. /**
  145. * struct emu_ae_enable - EMU Asymmetric engines.
  146. * @enable: Individual enables for each of the cluster's
  147. * 20 Asymmetric Engines.
  148. */
  149. union emu_ae_enable {
  150. u64 value;
  151. struct {
  152. #if (defined(__BIG_ENDIAN_BITFIELD))
  153. u64 raz : 44;
  154. u64 enable : 20;
  155. #else
  156. u64 enable : 20;
  157. u64 raz : 44;
  158. #endif
  159. } s;
  160. };
  161. /**
  162. * struct emu_wd_int_ena_w1s - EMU Interrupt Enable Registers
  163. * @ae_wd: Reads or sets enable for EMU(0..3)_WD_INT[AE_WD]
  164. * @se_wd: Reads or sets enable for EMU(0..3)_WD_INT[SE_WD]
  165. */
  166. union emu_wd_int_ena_w1s {
  167. u64 value;
  168. struct {
  169. #if (defined(__BIG_ENDIAN_BITFIELD))
  170. u64 raz2 : 12;
  171. u64 ae_wd : 20;
  172. u64 raz1 : 16;
  173. u64 se_wd : 16;
  174. #else
  175. u64 se_wd : 16;
  176. u64 raz1 : 16;
  177. u64 ae_wd : 20;
  178. u64 raz2 : 12;
  179. #endif
  180. } s;
  181. };
  182. /**
  183. * struct emu_ge_int_ena_w1s - EMU Interrupt Enable set registers
  184. * @ae_ge: Reads or sets enable for EMU(0..3)_GE_INT[AE_GE]
  185. * @se_ge: Reads or sets enable for EMU(0..3)_GE_INT[SE_GE]
  186. */
  187. union emu_ge_int_ena_w1s {
  188. u64 value;
  189. struct {
  190. #if (defined(__BIG_ENDIAN_BITFIELD))
  191. u64 raz_52_63 : 12;
  192. u64 ae_ge : 20;
  193. u64 raz_16_31: 16;
  194. u64 se_ge : 16;
  195. #else
  196. u64 se_ge : 16;
  197. u64 raz_16_31: 16;
  198. u64 ae_ge : 20;
  199. u64 raz_52_63 : 12;
  200. #endif
  201. } s;
  202. };
  203. /**
  204. * struct nps_pkt_slc_ctl - Solicited Packet Out Control Registers
  205. * @rh: Indicates whether to remove or include the response header
  206. * 1 = Include, 0 = Remove
  207. * @z: If set, 8 trailing 0x00 bytes will be added to the end of the
  208. * outgoing packet.
  209. * @enb: Enable for this port.
  210. */
  211. union nps_pkt_slc_ctl {
  212. u64 value;
  213. struct {
  214. #if defined(__BIG_ENDIAN_BITFIELD)
  215. u64 raz : 61;
  216. u64 rh : 1;
  217. u64 z : 1;
  218. u64 enb : 1;
  219. #else
  220. u64 enb : 1;
  221. u64 z : 1;
  222. u64 rh : 1;
  223. u64 raz : 61;
  224. #endif
  225. } s;
  226. };
  227. /**
  228. * struct nps_pkt_slc_cnts - Solicited Packet Out Count Registers
  229. * @slc_int: Returns a 1 when:
  230. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
  231. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET].
  232. * To clear the bit, the CNTS register must be written to clear.
  233. * @in_int: Returns a 1 when:
  234. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT].
  235. * To clear the bit, the DONE_CNTS register must be written to clear.
  236. * @mbox_int: Returns a 1 when:
  237. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. To clear the bit,
  238. * write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] with 1.
  239. * @timer: Timer, incremented every 2048 coprocessor clock cycles
  240. * when [CNT] is not zero. The hardware clears both [TIMER] and
  241. * [INT] when [CNT] goes to 0.
  242. * @cnt: Packet counter. Hardware adds to [CNT] as it sends packets out.
  243. * On a write to this CSR, hardware subtracts the amount written to the
  244. * [CNT] field from [CNT].
  245. */
  246. union nps_pkt_slc_cnts {
  247. u64 value;
  248. struct {
  249. #if defined(__BIG_ENDIAN_BITFIELD)
  250. u64 slc_int : 1;
  251. u64 uns_int : 1;
  252. u64 in_int : 1;
  253. u64 mbox_int : 1;
  254. u64 resend : 1;
  255. u64 raz : 5;
  256. u64 timer : 22;
  257. u64 cnt : 32;
  258. #else
  259. u64 cnt : 32;
  260. u64 timer : 22;
  261. u64 raz : 5;
  262. u64 resend : 1;
  263. u64 mbox_int : 1;
  264. u64 in_int : 1;
  265. u64 uns_int : 1;
  266. u64 slc_int : 1;
  267. #endif
  268. } s;
  269. };
  270. /**
  271. * struct nps_pkt_slc_int_levels - Solicited Packet Out Interrupt Levels
  272. * Registers.
  273. * @bmode: Determines whether NPS_PKT_SLC_CNTS[CNT] is a byte or
  274. * packet counter.
  275. * @timet: Output port counter time interrupt threshold.
  276. * @cnt: Output port counter interrupt threshold.
  277. */
  278. union nps_pkt_slc_int_levels {
  279. u64 value;
  280. struct {
  281. #if defined(__BIG_ENDIAN_BITFIELD)
  282. u64 bmode : 1;
  283. u64 raz : 9;
  284. u64 timet : 22;
  285. u64 cnt : 32;
  286. #else
  287. u64 cnt : 32;
  288. u64 timet : 22;
  289. u64 raz : 9;
  290. u64 bmode : 1;
  291. #endif
  292. } s;
  293. };
  294. /**
  295. * struct nps_pkt_inst - NPS Packet Interrupt Register
  296. * @in_err: Set when any NPS_PKT_IN_RERR_HI/LO bit and
  297. * corresponding NPS_PKT_IN_RERR_*_ENA_* bit are bot set.
  298. * @uns_err: Set when any NSP_PKT_UNS_RERR_HI/LO bit and
  299. * corresponding NPS_PKT_UNS_RERR_*_ENA_* bit are both set.
  300. * @slc_er: Set when any NSP_PKT_SLC_RERR_HI/LO bit and
  301. * corresponding NPS_PKT_SLC_RERR_*_ENA_* bit are both set.
  302. */
  303. union nps_pkt_int {
  304. u64 value;
  305. struct {
  306. #if defined(__BIG_ENDIAN_BITFIELD)
  307. u64 raz : 54;
  308. u64 uns_wto : 1;
  309. u64 in_err : 1;
  310. u64 uns_err : 1;
  311. u64 slc_err : 1;
  312. u64 in_dbe : 1;
  313. u64 in_sbe : 1;
  314. u64 uns_dbe : 1;
  315. u64 uns_sbe : 1;
  316. u64 slc_dbe : 1;
  317. u64 slc_sbe : 1;
  318. #else
  319. u64 slc_sbe : 1;
  320. u64 slc_dbe : 1;
  321. u64 uns_sbe : 1;
  322. u64 uns_dbe : 1;
  323. u64 in_sbe : 1;
  324. u64 in_dbe : 1;
  325. u64 slc_err : 1;
  326. u64 uns_err : 1;
  327. u64 in_err : 1;
  328. u64 uns_wto : 1;
  329. u64 raz : 54;
  330. #endif
  331. } s;
  332. };
  333. /**
  334. * struct nps_pkt_in_done_cnts - Input instruction ring counts registers
  335. * @slc_cnt: Returns a 1 when:
  336. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
  337. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SCL(i)_INT_LEVELS[TIMET]
  338. * To clear the bit, the CNTS register must be
  339. * written to clear the underlying condition
  340. * @uns_int: Return a 1 when:
  341. * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT], or
  342. * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
  343. * To clear the bit, the CNTS register must be
  344. * written to clear the underlying condition
  345. * @in_int: Returns a 1 when:
  346. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
  347. * To clear the bit, the DONE_CNTS register
  348. * must be written to clear the underlying condition
  349. * @mbox_int: Returns a 1 when:
  350. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set.
  351. * To clear the bit, write NPS_PKT_MBOX_PF_VF(i)_INT[INTR]
  352. * with 1.
  353. * @resend: A write of 1 will resend an MSI-X interrupt message if any
  354. * of the following conditions are true for this ring "i".
  355. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT]
  356. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET]
  357. * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT]
  358. * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
  359. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
  360. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set
  361. * @cnt: Packet counter. Hardware adds to [CNT] as it reads
  362. * packets. On a write to this CSR, hardware substracts the
  363. * amount written to the [CNT] field from [CNT], which will
  364. * clear PKT_IN(i)_INT_STATUS[INTR] if [CNT] becomes <=
  365. * NPS_PKT_IN(i)_INT_LEVELS[CNT]. This register should be
  366. * cleared before enabling a ring by reading the current
  367. * value and writing it back.
  368. */
  369. union nps_pkt_in_done_cnts {
  370. u64 value;
  371. struct {
  372. #if defined(__BIG_ENDIAN_BITFIELD)
  373. u64 slc_int : 1;
  374. u64 uns_int : 1;
  375. u64 in_int : 1;
  376. u64 mbox_int : 1;
  377. u64 resend : 1;
  378. u64 raz : 27;
  379. u64 cnt : 32;
  380. #else
  381. u64 cnt : 32;
  382. u64 raz : 27;
  383. u64 resend : 1;
  384. u64 mbox_int : 1;
  385. u64 in_int : 1;
  386. u64 uns_int : 1;
  387. u64 slc_int : 1;
  388. #endif
  389. } s;
  390. };
  391. /**
  392. * struct nps_pkt_in_instr_ctl - Input Instruction Ring Control Registers.
  393. * @is64b: If 1, the ring uses 64-byte instructions. If 0, the
  394. * ring uses 32-byte instructions.
  395. * @enb: Enable for the input ring.
  396. */
  397. union nps_pkt_in_instr_ctl {
  398. u64 value;
  399. struct {
  400. #if (defined(__BIG_ENDIAN_BITFIELD))
  401. u64 raz : 62;
  402. u64 is64b : 1;
  403. u64 enb : 1;
  404. #else
  405. u64 enb : 1;
  406. u64 is64b : 1;
  407. u64 raz : 62;
  408. #endif
  409. } s;
  410. };
  411. /**
  412. * struct nps_pkt_in_instr_rsize - Input instruction ring size registers
  413. * @rsize: Ring size (number of instructions)
  414. */
  415. union nps_pkt_in_instr_rsize {
  416. u64 value;
  417. struct {
  418. #if (defined(__BIG_ENDIAN_BITFIELD))
  419. u64 raz : 32;
  420. u64 rsize : 32;
  421. #else
  422. u64 rsize : 32;
  423. u64 raz : 32;
  424. #endif
  425. } s;
  426. };
  427. /**
  428. * struct nps_pkt_in_instr_baoff_dbell - Input instruction ring
  429. * base address offset and doorbell registers
  430. * @aoff: Address offset. The offset from the NPS_PKT_IN_INSTR_BADDR
  431. * where the next pointer is read.
  432. * @dbell: Pointer list doorbell count. Write operations to this field
  433. * increments the present value here. Read operations return the
  434. * present value.
  435. */
  436. union nps_pkt_in_instr_baoff_dbell {
  437. u64 value;
  438. struct {
  439. #if (defined(__BIG_ENDIAN_BITFIELD))
  440. u64 aoff : 32;
  441. u64 dbell : 32;
  442. #else
  443. u64 dbell : 32;
  444. u64 aoff : 32;
  445. #endif
  446. } s;
  447. };
  448. /**
  449. * struct nps_core_int_ena_w1s - NPS core interrupt enable set register
  450. * @host_nps_wr_err: Reads or sets enable for
  451. * NPS_CORE_INT[HOST_NPS_WR_ERR].
  452. * @npco_dma_malform: Reads or sets enable for
  453. * NPS_CORE_INT[NPCO_DMA_MALFORM].
  454. * @exec_wr_timeout: Reads or sets enable for
  455. * NPS_CORE_INT[EXEC_WR_TIMEOUT].
  456. * @host_wr_timeout: Reads or sets enable for
  457. * NPS_CORE_INT[HOST_WR_TIMEOUT].
  458. * @host_wr_err: Reads or sets enable for
  459. * NPS_CORE_INT[HOST_WR_ERR]
  460. */
  461. union nps_core_int_ena_w1s {
  462. u64 value;
  463. struct {
  464. #if (defined(__BIG_ENDIAN_BITFIELD))
  465. u64 raz4 : 55;
  466. u64 host_nps_wr_err : 1;
  467. u64 npco_dma_malform : 1;
  468. u64 exec_wr_timeout : 1;
  469. u64 host_wr_timeout : 1;
  470. u64 host_wr_err : 1;
  471. u64 raz3 : 1;
  472. u64 raz2 : 1;
  473. u64 raz1 : 1;
  474. u64 raz0 : 1;
  475. #else
  476. u64 raz0 : 1;
  477. u64 raz1 : 1;
  478. u64 raz2 : 1;
  479. u64 raz3 : 1;
  480. u64 host_wr_err : 1;
  481. u64 host_wr_timeout : 1;
  482. u64 exec_wr_timeout : 1;
  483. u64 npco_dma_malform : 1;
  484. u64 host_nps_wr_err : 1;
  485. u64 raz4 : 55;
  486. #endif
  487. } s;
  488. };
  489. /**
  490. * struct nps_core_gbl_vfcfg - Global VF Configuration Register.
  491. * @ilk_disable: When set, this bit indicates that the ILK interface has
  492. * been disabled.
  493. * @obaf: BMO allocation control
  494. * 0 = allocate per queue
  495. * 1 = allocate per VF
  496. * @ibaf: BMI allocation control
  497. * 0 = allocate per queue
  498. * 1 = allocate per VF
  499. * @zaf: ZIP allocation control
  500. * 0 = allocate per queue
  501. * 1 = allocate per VF
  502. * @aeaf: AE allocation control
  503. * 0 = allocate per queue
  504. * 1 = allocate per VF
  505. * @seaf: SE allocation control
  506. * 0 = allocation per queue
  507. * 1 = allocate per VF
  508. * @cfg: VF/PF mode.
  509. */
  510. union nps_core_gbl_vfcfg {
  511. u64 value;
  512. struct {
  513. #if (defined(__BIG_ENDIAN_BITFIELD))
  514. u64 raz :55;
  515. u64 ilk_disable :1;
  516. u64 obaf :1;
  517. u64 ibaf :1;
  518. u64 zaf :1;
  519. u64 aeaf :1;
  520. u64 seaf :1;
  521. u64 cfg :3;
  522. #else
  523. u64 cfg :3;
  524. u64 seaf :1;
  525. u64 aeaf :1;
  526. u64 zaf :1;
  527. u64 ibaf :1;
  528. u64 obaf :1;
  529. u64 ilk_disable :1;
  530. u64 raz :55;
  531. #endif
  532. } s;
  533. };
  534. /**
  535. * struct nps_core_int_active - NPS Core Interrupt Active Register
  536. * @resend: Resend MSI-X interrupt if needs to handle interrupts
  537. * Sofware can set this bit and then exit the ISR.
  538. * @ocla: Set when any OCLA(0)_INT and corresponding OCLA(0_INT_ENA_W1C
  539. * bit are set
  540. * @mbox: Set when any NPS_PKT_MBOX_INT_LO/HI and corresponding
  541. * NPS_PKT_MBOX_INT_LO_ENA_W1C/HI_ENA_W1C bits are set
  542. * @emu: bit i is set in [EMU] when any EMU(i)_INT bit is set
  543. * @bmo: Set when any BMO_INT bit is set
  544. * @bmi: Set when any BMI_INT bit is set or when any non-RO
  545. * BMI_INT and corresponding BMI_INT_ENA_W1C bits are both set
  546. * @aqm: Set when any AQM_INT bit is set
  547. * @zqm: Set when any ZQM_INT bit is set
  548. * @efl: Set when any EFL_INT RO bit is set or when any non-RO EFL_INT
  549. * and corresponding EFL_INT_ENA_W1C bits are both set
  550. * @ilk: Set when any ILK_INT bit is set
  551. * @lbc: Set when any LBC_INT RO bit is set or when any non-RO LBC_INT
  552. * and corresponding LBC_INT_ENA_W1C bits are bot set
  553. * @pem: Set when any PEM(0)_INT RO bit is set or when any non-RO
  554. * PEM(0)_INT and corresponding PEM(0)_INT_ENA_W1C bit are both set
  555. * @ucd: Set when any UCD_INT bit is set
  556. * @zctl: Set when any ZIP_INT RO bit is set or when any non-RO ZIP_INT
  557. * and corresponding ZIP_INT_ENA_W1C bits are both set
  558. * @lbm: Set when any LBM_INT bit is set
  559. * @nps_pkt: Set when any NPS_PKT_INT bit is set
  560. * @nps_core: Set when any NPS_CORE_INT RO bit is set or when non-RO
  561. * NPS_CORE_INT and corresponding NSP_CORE_INT_ENA_W1C bits are both set
  562. */
  563. union nps_core_int_active {
  564. u64 value;
  565. struct {
  566. #if (defined(__BIG_ENDIAN_BITFIELD))
  567. u64 resend : 1;
  568. u64 raz : 43;
  569. u64 ocla : 1;
  570. u64 mbox : 1;
  571. u64 emu : 4;
  572. u64 bmo : 1;
  573. u64 bmi : 1;
  574. u64 aqm : 1;
  575. u64 zqm : 1;
  576. u64 efl : 1;
  577. u64 ilk : 1;
  578. u64 lbc : 1;
  579. u64 pem : 1;
  580. u64 pom : 1;
  581. u64 ucd : 1;
  582. u64 zctl : 1;
  583. u64 lbm : 1;
  584. u64 nps_pkt : 1;
  585. u64 nps_core : 1;
  586. #else
  587. u64 nps_core : 1;
  588. u64 nps_pkt : 1;
  589. u64 lbm : 1;
  590. u64 zctl: 1;
  591. u64 ucd : 1;
  592. u64 pom : 1;
  593. u64 pem : 1;
  594. u64 lbc : 1;
  595. u64 ilk : 1;
  596. u64 efl : 1;
  597. u64 zqm : 1;
  598. u64 aqm : 1;
  599. u64 bmi : 1;
  600. u64 bmo : 1;
  601. u64 emu : 4;
  602. u64 mbox : 1;
  603. u64 ocla : 1;
  604. u64 raz : 43;
  605. u64 resend : 1;
  606. #endif
  607. } s;
  608. };
  609. /**
  610. * struct efl_core_int - EFL Interrupt Registers
  611. * @epci_decode_err: EPCI decoded a transacation that was unknown
  612. * This error should only occurred when there is a micrcode/SE error
  613. * and should be considered fatal
  614. * @ae_err: An AE uncorrectable error occurred.
  615. * See EFL_CORE(0..3)_AE_ERR_INT
  616. * @se_err: An SE uncorrectable error occurred.
  617. * See EFL_CORE(0..3)_SE_ERR_INT
  618. * @dbe: Double-bit error occurred in EFL
  619. * @sbe: Single-bit error occurred in EFL
  620. * @d_left: Asserted when new POM-Header-BMI-data is
  621. * being sent to an Exec, and that Exec has Not read all BMI
  622. * data associated with the previous POM header
  623. * @len_ovr: Asserted when an Exec-Read is issued that is more than
  624. * 14 greater in length that the BMI data left to be read
  625. */
  626. union efl_core_int {
  627. u64 value;
  628. struct {
  629. #if (defined(__BIG_ENDIAN_BITFIELD))
  630. u64 raz : 57;
  631. u64 epci_decode_err : 1;
  632. u64 ae_err : 1;
  633. u64 se_err : 1;
  634. u64 dbe : 1;
  635. u64 sbe : 1;
  636. u64 d_left : 1;
  637. u64 len_ovr : 1;
  638. #else
  639. u64 len_ovr : 1;
  640. u64 d_left : 1;
  641. u64 sbe : 1;
  642. u64 dbe : 1;
  643. u64 se_err : 1;
  644. u64 ae_err : 1;
  645. u64 epci_decode_err : 1;
  646. u64 raz : 57;
  647. #endif
  648. } s;
  649. };
  650. /**
  651. * struct efl_core_int_ena_w1s - EFL core interrupt enable set register
  652. * @epci_decode_err: Reads or sets enable for
  653. * EFL_CORE(0..3)_INT[EPCI_DECODE_ERR].
  654. * @d_left: Reads or sets enable for
  655. * EFL_CORE(0..3)_INT[D_LEFT].
  656. * @len_ovr: Reads or sets enable for
  657. * EFL_CORE(0..3)_INT[LEN_OVR].
  658. */
  659. union efl_core_int_ena_w1s {
  660. u64 value;
  661. struct {
  662. #if (defined(__BIG_ENDIAN_BITFIELD))
  663. u64 raz_7_63 : 57;
  664. u64 epci_decode_err : 1;
  665. u64 raz_2_5 : 4;
  666. u64 d_left : 1;
  667. u64 len_ovr : 1;
  668. #else
  669. u64 len_ovr : 1;
  670. u64 d_left : 1;
  671. u64 raz_2_5 : 4;
  672. u64 epci_decode_err : 1;
  673. u64 raz_7_63 : 57;
  674. #endif
  675. } s;
  676. };
  677. /**
  678. * struct efl_rnm_ctl_status - RNM Control and Status Register
  679. * @ent_sel: Select input to RNM FIFO
  680. * @exp_ent: Exported entropy enable for random number generator
  681. * @rng_rst: Reset to RNG. Setting this bit to 1 cancels the generation
  682. * of the current random number.
  683. * @rnm_rst: Reset the RNM. Setting this bit to 1 clears all sorted numbers
  684. * in the random number memory.
  685. * @rng_en: Enabled the output of the RNG.
  686. * @ent_en: Entropy enable for random number generator.
  687. */
  688. union efl_rnm_ctl_status {
  689. u64 value;
  690. struct {
  691. #if (defined(__BIG_ENDIAN_BITFIELD))
  692. u64 raz_9_63 : 55;
  693. u64 ent_sel : 4;
  694. u64 exp_ent : 1;
  695. u64 rng_rst : 1;
  696. u64 rnm_rst : 1;
  697. u64 rng_en : 1;
  698. u64 ent_en : 1;
  699. #else
  700. u64 ent_en : 1;
  701. u64 rng_en : 1;
  702. u64 rnm_rst : 1;
  703. u64 rng_rst : 1;
  704. u64 exp_ent : 1;
  705. u64 ent_sel : 4;
  706. u64 raz_9_63 : 55;
  707. #endif
  708. } s;
  709. };
  710. /**
  711. * struct bmi_ctl - BMI control register
  712. * @ilk_hdrq_thrsh: Maximum number of header queue locations
  713. * that ILK packets may consume. When the threshold is
  714. * exceeded ILK_XOFF is sent to the BMI_X2P_ARB.
  715. * @nps_hdrq_thrsh: Maximum number of header queue locations
  716. * that NPS packets may consume. When the threshold is
  717. * exceeded NPS_XOFF is sent to the BMI_X2P_ARB.
  718. * @totl_hdrq_thrsh: Maximum number of header queue locations
  719. * that the sum of ILK and NPS packets may consume.
  720. * @ilk_free_thrsh: Maximum number of buffers that ILK packet
  721. * flows may consume before ILK_XOFF is sent to the BMI_X2P_ARB.
  722. * @nps_free_thrsh: Maximum number of buffers that NPS packet
  723. * flows may consume before NPS XOFF is sent to the BMI_X2p_ARB.
  724. * @totl_free_thrsh: Maximum number of buffers that bot ILK and NPS
  725. * packet flows may consume before both NPS_XOFF and ILK_XOFF
  726. * are asserted to the BMI_X2P_ARB.
  727. * @max_pkt_len: Maximum packet length, integral number of 256B
  728. * buffers.
  729. */
  730. union bmi_ctl {
  731. u64 value;
  732. struct {
  733. #if (defined(__BIG_ENDIAN_BITFIELD))
  734. u64 raz_56_63 : 8;
  735. u64 ilk_hdrq_thrsh : 8;
  736. u64 nps_hdrq_thrsh : 8;
  737. u64 totl_hdrq_thrsh : 8;
  738. u64 ilk_free_thrsh : 8;
  739. u64 nps_free_thrsh : 8;
  740. u64 totl_free_thrsh : 8;
  741. u64 max_pkt_len : 8;
  742. #else
  743. u64 max_pkt_len : 8;
  744. u64 totl_free_thrsh : 8;
  745. u64 nps_free_thrsh : 8;
  746. u64 ilk_free_thrsh : 8;
  747. u64 totl_hdrq_thrsh : 8;
  748. u64 nps_hdrq_thrsh : 8;
  749. u64 ilk_hdrq_thrsh : 8;
  750. u64 raz_56_63 : 8;
  751. #endif
  752. } s;
  753. };
  754. /**
  755. * struct bmi_int_ena_w1s - BMI interrupt enable set register
  756. * @ilk_req_oflw: Reads or sets enable for
  757. * BMI_INT[ILK_REQ_OFLW].
  758. * @nps_req_oflw: Reads or sets enable for
  759. * BMI_INT[NPS_REQ_OFLW].
  760. * @fpf_undrrn: Reads or sets enable for
  761. * BMI_INT[FPF_UNDRRN].
  762. * @eop_err_ilk: Reads or sets enable for
  763. * BMI_INT[EOP_ERR_ILK].
  764. * @eop_err_nps: Reads or sets enable for
  765. * BMI_INT[EOP_ERR_NPS].
  766. * @sop_err_ilk: Reads or sets enable for
  767. * BMI_INT[SOP_ERR_ILK].
  768. * @sop_err_nps: Reads or sets enable for
  769. * BMI_INT[SOP_ERR_NPS].
  770. * @pkt_rcv_err_ilk: Reads or sets enable for
  771. * BMI_INT[PKT_RCV_ERR_ILK].
  772. * @pkt_rcv_err_nps: Reads or sets enable for
  773. * BMI_INT[PKT_RCV_ERR_NPS].
  774. * @max_len_err_ilk: Reads or sets enable for
  775. * BMI_INT[MAX_LEN_ERR_ILK].
  776. * @max_len_err_nps: Reads or sets enable for
  777. * BMI_INT[MAX_LEN_ERR_NPS].
  778. */
  779. union bmi_int_ena_w1s {
  780. u64 value;
  781. struct {
  782. #if (defined(__BIG_ENDIAN_BITFIELD))
  783. u64 raz_13_63 : 51;
  784. u64 ilk_req_oflw : 1;
  785. u64 nps_req_oflw : 1;
  786. u64 raz_10 : 1;
  787. u64 raz_9 : 1;
  788. u64 fpf_undrrn : 1;
  789. u64 eop_err_ilk : 1;
  790. u64 eop_err_nps : 1;
  791. u64 sop_err_ilk : 1;
  792. u64 sop_err_nps : 1;
  793. u64 pkt_rcv_err_ilk : 1;
  794. u64 pkt_rcv_err_nps : 1;
  795. u64 max_len_err_ilk : 1;
  796. u64 max_len_err_nps : 1;
  797. #else
  798. u64 max_len_err_nps : 1;
  799. u64 max_len_err_ilk : 1;
  800. u64 pkt_rcv_err_nps : 1;
  801. u64 pkt_rcv_err_ilk : 1;
  802. u64 sop_err_nps : 1;
  803. u64 sop_err_ilk : 1;
  804. u64 eop_err_nps : 1;
  805. u64 eop_err_ilk : 1;
  806. u64 fpf_undrrn : 1;
  807. u64 raz_9 : 1;
  808. u64 raz_10 : 1;
  809. u64 nps_req_oflw : 1;
  810. u64 ilk_req_oflw : 1;
  811. u64 raz_13_63 : 51;
  812. #endif
  813. } s;
  814. };
  815. /**
  816. * struct bmo_ctl2 - BMO Control2 Register
  817. * @arb_sel: Determines P2X Arbitration
  818. * @ilk_buf_thrsh: Maximum number of buffers that the
  819. * ILK packet flows may consume before ILK XOFF is
  820. * asserted to the POM.
  821. * @nps_slc_buf_thrsh: Maximum number of buffers that the
  822. * NPS_SLC packet flow may consume before NPS_SLC XOFF is
  823. * asserted to the POM.
  824. * @nps_uns_buf_thrsh: Maximum number of buffers that the
  825. * NPS_UNS packet flow may consume before NPS_UNS XOFF is
  826. * asserted to the POM.
  827. * @totl_buf_thrsh: Maximum number of buffers that ILK, NPS_UNS and
  828. * NPS_SLC packet flows may consume before NPS_UNS XOFF, NSP_SLC and
  829. * ILK_XOFF are all asserted POM.
  830. */
  831. union bmo_ctl2 {
  832. u64 value;
  833. struct {
  834. #if (defined(__BIG_ENDIAN_BITFIELD))
  835. u64 arb_sel : 1;
  836. u64 raz_32_62 : 31;
  837. u64 ilk_buf_thrsh : 8;
  838. u64 nps_slc_buf_thrsh : 8;
  839. u64 nps_uns_buf_thrsh : 8;
  840. u64 totl_buf_thrsh : 8;
  841. #else
  842. u64 totl_buf_thrsh : 8;
  843. u64 nps_uns_buf_thrsh : 8;
  844. u64 nps_slc_buf_thrsh : 8;
  845. u64 ilk_buf_thrsh : 8;
  846. u64 raz_32_62 : 31;
  847. u64 arb_sel : 1;
  848. #endif
  849. } s;
  850. };
  851. /**
  852. * struct pom_int_ena_w1s - POM interrupt enable set register
  853. * @illegal_intf: Reads or sets enable for POM_INT[ILLEGAL_INTF].
  854. * @illegal_dport: Reads or sets enable for POM_INT[ILLEGAL_DPORT].
  855. */
  856. union pom_int_ena_w1s {
  857. u64 value;
  858. struct {
  859. #if (defined(__BIG_ENDIAN_BITFIELD))
  860. u64 raz2 : 60;
  861. u64 illegal_intf : 1;
  862. u64 illegal_dport : 1;
  863. u64 raz1 : 1;
  864. u64 raz0 : 1;
  865. #else
  866. u64 raz0 : 1;
  867. u64 raz1 : 1;
  868. u64 illegal_dport : 1;
  869. u64 illegal_intf : 1;
  870. u64 raz2 : 60;
  871. #endif
  872. } s;
  873. };
  874. /**
  875. * struct lbc_inval_ctl - LBC invalidation control register
  876. * @wait_timer: Wait timer for wait state. [WAIT_TIMER] must
  877. * always be written with its reset value.
  878. * @cam_inval_start: Software should write [CAM_INVAL_START]=1
  879. * to initiate an LBC cache invalidation. After this, software
  880. * should read LBC_INVAL_STATUS until LBC_INVAL_STATUS[DONE] is set.
  881. * LBC hardware clears [CAVM_INVAL_START] before software can
  882. * observed LBC_INVAL_STATUS[DONE] to be set
  883. */
  884. union lbc_inval_ctl {
  885. u64 value;
  886. struct {
  887. #if (defined(__BIG_ENDIAN_BITFIELD))
  888. u64 raz2 : 48;
  889. u64 wait_timer : 8;
  890. u64 raz1 : 6;
  891. u64 cam_inval_start : 1;
  892. u64 raz0 : 1;
  893. #else
  894. u64 raz0 : 1;
  895. u64 cam_inval_start : 1;
  896. u64 raz1 : 6;
  897. u64 wait_timer : 8;
  898. u64 raz2 : 48;
  899. #endif
  900. } s;
  901. };
  902. /**
  903. * struct lbc_int_ena_w1s - LBC interrupt enable set register
  904. * @cam_hard_err: Reads or sets enable for LBC_INT[CAM_HARD_ERR].
  905. * @cam_inval_abort: Reads or sets enable for LBC_INT[CAM_INVAL_ABORT].
  906. * @over_fetch_err: Reads or sets enable for LBC_INT[OVER_FETCH_ERR].
  907. * @cache_line_to_err: Reads or sets enable for
  908. * LBC_INT[CACHE_LINE_TO_ERR].
  909. * @cam_soft_err: Reads or sets enable for
  910. * LBC_INT[CAM_SOFT_ERR].
  911. * @dma_rd_err: Reads or sets enable for
  912. * LBC_INT[DMA_RD_ERR].
  913. */
  914. union lbc_int_ena_w1s {
  915. u64 value;
  916. struct {
  917. #if (defined(__BIG_ENDIAN_BITFIELD))
  918. u64 raz_10_63 : 54;
  919. u64 cam_hard_err : 1;
  920. u64 cam_inval_abort : 1;
  921. u64 over_fetch_err : 1;
  922. u64 cache_line_to_err : 1;
  923. u64 raz_2_5 : 4;
  924. u64 cam_soft_err : 1;
  925. u64 dma_rd_err : 1;
  926. #else
  927. u64 dma_rd_err : 1;
  928. u64 cam_soft_err : 1;
  929. u64 raz_2_5 : 4;
  930. u64 cache_line_to_err : 1;
  931. u64 over_fetch_err : 1;
  932. u64 cam_inval_abort : 1;
  933. u64 cam_hard_err : 1;
  934. u64 raz_10_63 : 54;
  935. #endif
  936. } s;
  937. };
  938. /**
  939. * struct lbc_int - LBC interrupt summary register
  940. * @cam_hard_err: indicates a fatal hardware error.
  941. * It requires system reset.
  942. * When [CAM_HARD_ERR] is set, LBC stops logging any new information in
  943. * LBC_POM_MISS_INFO_LOG,
  944. * LBC_POM_MISS_ADDR_LOG,
  945. * LBC_EFL_MISS_INFO_LOG, and
  946. * LBC_EFL_MISS_ADDR_LOG.
  947. * Software should sample them.
  948. * @cam_inval_abort: indicates a fatal hardware error.
  949. * System reset is required.
  950. * @over_fetch_err: indicates a fatal hardware error
  951. * System reset is required
  952. * @cache_line_to_err: is a debug feature.
  953. * This timeout interrupt bit tells the software that
  954. * a cacheline in LBC has non-zero usage and the context
  955. * has not been used for greater than the
  956. * LBC_TO_CNT[TO_CNT] time interval.
  957. * @sbe: Memory SBE error. This is recoverable via ECC.
  958. * See LBC_ECC_INT for more details.
  959. * @dbe: Memory DBE error. This is a fatal and requires a
  960. * system reset.
  961. * @pref_dat_len_mismatch_err: Summary bit for context length
  962. * mismatch errors.
  963. * @rd_dat_len_mismatch_err: Summary bit for SE read data length
  964. * greater than data prefect length errors.
  965. * @cam_soft_err: is recoverable. Software must complete a
  966. * LBC_INVAL_CTL[CAM_INVAL_START] invalidation sequence and
  967. * then clear [CAM_SOFT_ERR].
  968. * @dma_rd_err: A context prefect read of host memory returned with
  969. * a read error.
  970. */
  971. union lbc_int {
  972. u64 value;
  973. struct {
  974. #if (defined(__BIG_ENDIAN_BITFIELD))
  975. u64 raz_10_63 : 54;
  976. u64 cam_hard_err : 1;
  977. u64 cam_inval_abort : 1;
  978. u64 over_fetch_err : 1;
  979. u64 cache_line_to_err : 1;
  980. u64 sbe : 1;
  981. u64 dbe : 1;
  982. u64 pref_dat_len_mismatch_err : 1;
  983. u64 rd_dat_len_mismatch_err : 1;
  984. u64 cam_soft_err : 1;
  985. u64 dma_rd_err : 1;
  986. #else
  987. u64 dma_rd_err : 1;
  988. u64 cam_soft_err : 1;
  989. u64 rd_dat_len_mismatch_err : 1;
  990. u64 pref_dat_len_mismatch_err : 1;
  991. u64 dbe : 1;
  992. u64 sbe : 1;
  993. u64 cache_line_to_err : 1;
  994. u64 over_fetch_err : 1;
  995. u64 cam_inval_abort : 1;
  996. u64 cam_hard_err : 1;
  997. u64 raz_10_63 : 54;
  998. #endif
  999. } s;
  1000. };
  1001. /**
  1002. * struct lbc_inval_status: LBC Invalidation status register
  1003. * @cam_clean_entry_complete_cnt: The number of entries that are
  1004. * cleaned up successfully.
  1005. * @cam_clean_entry_cnt: The number of entries that have the CAM
  1006. * inval command issued.
  1007. * @cam_inval_state: cam invalidation FSM state
  1008. * @cam_inval_abort: cam invalidation abort
  1009. * @cam_rst_rdy: lbc_cam reset ready
  1010. * @done: LBC clears [DONE] when
  1011. * LBC_INVAL_CTL[CAM_INVAL_START] is written with a one,
  1012. * and sets [DONE] when it completes the invalidation
  1013. * sequence.
  1014. */
  1015. union lbc_inval_status {
  1016. u64 value;
  1017. struct {
  1018. #if (defined(__BIG_ENDIAN_BITFIELD))
  1019. u64 raz3 : 23;
  1020. u64 cam_clean_entry_complete_cnt : 9;
  1021. u64 raz2 : 7;
  1022. u64 cam_clean_entry_cnt : 9;
  1023. u64 raz1 : 5;
  1024. u64 cam_inval_state : 3;
  1025. u64 raz0 : 5;
  1026. u64 cam_inval_abort : 1;
  1027. u64 cam_rst_rdy : 1;
  1028. u64 done : 1;
  1029. #else
  1030. u64 done : 1;
  1031. u64 cam_rst_rdy : 1;
  1032. u64 cam_inval_abort : 1;
  1033. u64 raz0 : 5;
  1034. u64 cam_inval_state : 3;
  1035. u64 raz1 : 5;
  1036. u64 cam_clean_entry_cnt : 9;
  1037. u64 raz2 : 7;
  1038. u64 cam_clean_entry_complete_cnt : 9;
  1039. u64 raz3 : 23;
  1040. #endif
  1041. } s;
  1042. };
  1043. #endif /* __NITROX_CSR_H */