zip_main.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <linux/acpi.h>
  4. #include <linux/bitops.h>
  5. #include <linux/debugfs.h>
  6. #include <linux/init.h>
  7. #include <linux/io.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/pci.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/seq_file.h>
  13. #include <linux/topology.h>
  14. #include <linux/uacce.h>
  15. #include "zip.h"
  16. #define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
  17. #define HZIP_QUEUE_NUM_V1 4096
  18. #define HZIP_CLOCK_GATE_CTRL 0x301004
  19. #define HZIP_DECOMP_CHECK_ENABLE BIT(16)
  20. #define HZIP_FSM_MAX_CNT 0x301008
  21. #define HZIP_PORT_ARCA_CHE_0 0x301040
  22. #define HZIP_PORT_ARCA_CHE_1 0x301044
  23. #define HZIP_PORT_AWCA_CHE_0 0x301060
  24. #define HZIP_PORT_AWCA_CHE_1 0x301064
  25. #define HZIP_CACHE_ALL_EN 0xffffffff
  26. #define HZIP_BD_RUSER_32_63 0x301110
  27. #define HZIP_SGL_RUSER_32_63 0x30111c
  28. #define HZIP_DATA_RUSER_32_63 0x301128
  29. #define HZIP_DATA_WUSER_32_63 0x301134
  30. #define HZIP_BD_WUSER_32_63 0x301140
  31. #define HZIP_QM_IDEL_STATUS 0x3040e4
  32. #define HZIP_CORE_DFX_BASE 0x301000
  33. #define HZIP_CORE_DFX_DECOMP_BASE 0x304000
  34. #define HZIP_CORE_DFX_COMP_0 0x302000
  35. #define HZIP_CORE_DFX_COMP_1 0x303000
  36. #define HZIP_CORE_DFX_DECOMP_0 0x304000
  37. #define HZIP_CORE_DFX_DECOMP_1 0x305000
  38. #define HZIP_CORE_DFX_DECOMP_2 0x306000
  39. #define HZIP_CORE_DFX_DECOMP_3 0x307000
  40. #define HZIP_CORE_DFX_DECOMP_4 0x308000
  41. #define HZIP_CORE_DFX_DECOMP_5 0x309000
  42. #define HZIP_CORE_REGS_BASE_LEN 0xB0
  43. #define HZIP_CORE_REGS_DFX_LEN 0x28
  44. #define HZIP_CORE_ADDR_INTRVL 0x1000
  45. #define HZIP_CORE_INT_SOURCE 0x3010A0
  46. #define HZIP_CORE_INT_MASK_REG 0x3010A4
  47. #define HZIP_CORE_INT_SET 0x3010A8
  48. #define HZIP_CORE_INT_STATUS 0x3010AC
  49. #define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
  50. #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
  51. #define HZIP_CORE_INT_RAS_CE_ENB 0x301160
  52. #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
  53. #define HZIP_CORE_INT_RAS_FE_ENB 0x301168
  54. #define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0
  55. #define HZIP_OOO_SHUTDOWN_SEL 0x30120C
  56. #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
  57. #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
  58. #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
  59. #define HZIP_SQE_SIZE 128
  60. #define HZIP_PF_DEF_Q_NUM 64
  61. #define HZIP_PF_DEF_Q_BASE 0
  62. #define HZIP_CTX_Q_NUM_DEF 2
  63. #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
  64. #define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
  65. #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
  66. #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
  67. #define HZIP_WR_PORT BIT(11)
  68. #define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
  69. #define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
  70. #define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
  71. #define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
  72. #define HZIP_BUF_SIZE 22
  73. #define HZIP_SQE_MASK_OFFSET 64
  74. #define HZIP_SQE_MASK_LEN 48
  75. #define HZIP_CNT_CLR_CE_EN BIT(0)
  76. #define HZIP_RO_CNT_CLR_CE_EN BIT(2)
  77. #define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \
  78. HZIP_RO_CNT_CLR_CE_EN)
  79. #define HZIP_PREFETCH_CFG 0x3011B0
  80. #define HZIP_SVA_TRANS 0x3011C4
  81. #define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
  82. #define HZIP_SVA_PREFETCH_DISABLE BIT(26)
  83. #define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
  84. #define HZIP_SHAPER_RATE_COMPRESS 750
  85. #define HZIP_SHAPER_RATE_DECOMPRESS 140
  86. #define HZIP_DELAY_1_US 1
  87. #define HZIP_POLL_TIMEOUT_US 1000
  88. /* clock gating */
  89. #define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
  90. #define HZIP_PEH_CFG_AUTO_GATE_EN BIT(0)
  91. #define HZIP_CORE_GATED_EN GENMASK(15, 8)
  92. #define HZIP_CORE_GATED_OOO_EN BIT(29)
  93. #define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \
  94. HZIP_CORE_GATED_OOO_EN)
  95. /* zip comp high performance */
  96. #define HZIP_HIGH_PERF_OFFSET 0x301208
  97. enum {
  98. HZIP_HIGH_COMP_RATE,
  99. HZIP_HIGH_COMP_PERF,
  100. };
  101. static const char hisi_zip_name[] = "hisi_zip";
  102. static struct dentry *hzip_debugfs_root;
  103. struct hisi_zip_hw_error {
  104. u32 int_msk;
  105. const char *msg;
  106. };
  107. struct zip_dfx_item {
  108. const char *name;
  109. u32 offset;
  110. };
  111. static const struct qm_dev_alg zip_dev_algs[] = { {
  112. .alg_msk = HZIP_ALG_ZLIB_BIT,
  113. .alg = "zlib\n",
  114. }, {
  115. .alg_msk = HZIP_ALG_GZIP_BIT,
  116. .alg = "gzip\n",
  117. }, {
  118. .alg_msk = HZIP_ALG_DEFLATE_BIT,
  119. .alg = "deflate\n",
  120. }, {
  121. .alg_msk = HZIP_ALG_LZ77_BIT,
  122. .alg = "lz77_zstd\n",
  123. },
  124. };
  125. static struct hisi_qm_list zip_devices = {
  126. .register_to_crypto = hisi_zip_register_to_crypto,
  127. .unregister_from_crypto = hisi_zip_unregister_from_crypto,
  128. };
  129. static struct zip_dfx_item zip_dfx_files[] = {
  130. {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
  131. {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
  132. {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
  133. {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
  134. };
  135. static const struct hisi_zip_hw_error zip_hw_error[] = {
  136. { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
  137. { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
  138. { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" },
  139. { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" },
  140. { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" },
  141. { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" },
  142. { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" },
  143. { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" },
  144. { .int_msk = BIT(8), .msg = "zip_com_inf_err" },
  145. { .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
  146. { .int_msk = BIT(10), .msg = "zip_pre_out_err" },
  147. { .int_msk = BIT(11), .msg = "zip_axi_poison_err" },
  148. { .int_msk = BIT(12), .msg = "zip_sva_err" },
  149. { /* sentinel */ }
  150. };
  151. enum ctrl_debug_file_index {
  152. HZIP_CLEAR_ENABLE,
  153. HZIP_DEBUG_FILE_NUM,
  154. };
  155. static const char * const ctrl_debug_file_name[] = {
  156. [HZIP_CLEAR_ENABLE] = "clear_enable",
  157. };
  158. struct ctrl_debug_file {
  159. enum ctrl_debug_file_index index;
  160. spinlock_t lock;
  161. struct hisi_zip_ctrl *ctrl;
  162. };
  163. /*
  164. * One ZIP controller has one PF and multiple VFs, some global configurations
  165. * which PF has need this structure.
  166. *
  167. * Just relevant for PF.
  168. */
  169. struct hisi_zip_ctrl {
  170. struct hisi_zip *hisi_zip;
  171. struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
  172. };
  173. enum zip_cap_type {
  174. ZIP_QM_NFE_MASK_CAP = 0x0,
  175. ZIP_QM_RESET_MASK_CAP,
  176. ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
  177. ZIP_QM_CE_MASK_CAP,
  178. ZIP_NFE_MASK_CAP,
  179. ZIP_RESET_MASK_CAP,
  180. ZIP_OOO_SHUTDOWN_MASK_CAP,
  181. ZIP_CE_MASK_CAP,
  182. ZIP_CLUSTER_NUM_CAP,
  183. ZIP_CORE_TYPE_NUM_CAP,
  184. ZIP_CORE_NUM_CAP,
  185. ZIP_CLUSTER_COMP_NUM_CAP,
  186. ZIP_CLUSTER_DECOMP_NUM_CAP,
  187. ZIP_DECOMP_ENABLE_BITMAP,
  188. ZIP_COMP_ENABLE_BITMAP,
  189. ZIP_DRV_ALG_BITMAP,
  190. ZIP_DEV_ALG_BITMAP,
  191. ZIP_CORE1_ALG_BITMAP,
  192. ZIP_CORE2_ALG_BITMAP,
  193. ZIP_CORE3_ALG_BITMAP,
  194. ZIP_CORE4_ALG_BITMAP,
  195. ZIP_CORE5_ALG_BITMAP,
  196. ZIP_CAP_MAX
  197. };
  198. static struct hisi_qm_cap_info zip_basic_cap_info[] = {
  199. {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
  200. {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
  201. {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
  202. {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
  203. {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
  204. {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
  205. {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
  206. {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
  207. {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
  208. {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
  209. {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
  210. {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
  211. {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
  212. {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
  213. {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
  214. {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x0, 0x30},
  215. {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0x3F},
  216. {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
  217. {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
  218. {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
  219. {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
  220. {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
  221. {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
  222. };
  223. enum zip_pre_store_cap_idx {
  224. ZIP_CORE_NUM_CAP_IDX = 0x0,
  225. ZIP_CLUSTER_COMP_NUM_CAP_IDX,
  226. ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
  227. ZIP_DECOMP_ENABLE_BITMAP_IDX,
  228. ZIP_COMP_ENABLE_BITMAP_IDX,
  229. ZIP_DRV_ALG_BITMAP_IDX,
  230. ZIP_DEV_ALG_BITMAP_IDX,
  231. };
  232. static const u32 zip_pre_store_caps[] = {
  233. ZIP_CORE_NUM_CAP,
  234. ZIP_CLUSTER_COMP_NUM_CAP,
  235. ZIP_CLUSTER_DECOMP_NUM_CAP,
  236. ZIP_DECOMP_ENABLE_BITMAP,
  237. ZIP_COMP_ENABLE_BITMAP,
  238. ZIP_DRV_ALG_BITMAP,
  239. ZIP_DEV_ALG_BITMAP,
  240. };
  241. static const struct debugfs_reg32 hzip_dfx_regs[] = {
  242. {"HZIP_GET_BD_NUM ", 0x00},
  243. {"HZIP_GET_RIGHT_BD ", 0x04},
  244. {"HZIP_GET_ERROR_BD ", 0x08},
  245. {"HZIP_DONE_BD_NUM ", 0x0c},
  246. {"HZIP_WORK_CYCLE ", 0x10},
  247. {"HZIP_IDLE_CYCLE ", 0x18},
  248. {"HZIP_MAX_DELAY ", 0x20},
  249. {"HZIP_MIN_DELAY ", 0x24},
  250. {"HZIP_AVG_DELAY ", 0x28},
  251. {"HZIP_MEM_VISIBLE_DATA ", 0x30},
  252. {"HZIP_MEM_VISIBLE_ADDR ", 0x34},
  253. {"HZIP_CONSUMED_BYTE ", 0x38},
  254. {"HZIP_PRODUCED_BYTE ", 0x40},
  255. {"HZIP_COMP_INF ", 0x70},
  256. {"HZIP_PRE_OUT ", 0x78},
  257. {"HZIP_BD_RD ", 0x7c},
  258. {"HZIP_BD_WR ", 0x80},
  259. {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84},
  260. {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88},
  261. {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8c},
  262. {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94},
  263. {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9c},
  264. };
  265. static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
  266. {"HZIP_CLOCK_GATE_CTRL ", 0x301004},
  267. {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160},
  268. {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164},
  269. {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168},
  270. {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C},
  271. };
  272. static const struct debugfs_reg32 hzip_dump_dfx_regs[] = {
  273. {"HZIP_GET_BD_NUM ", 0x00},
  274. {"HZIP_GET_RIGHT_BD ", 0x04},
  275. {"HZIP_GET_ERROR_BD ", 0x08},
  276. {"HZIP_DONE_BD_NUM ", 0x0c},
  277. {"HZIP_MAX_DELAY ", 0x20},
  278. };
  279. /* define the ZIP's dfx regs region and region length */
  280. static struct dfx_diff_registers hzip_diff_regs[] = {
  281. {
  282. .reg_offset = HZIP_CORE_DFX_BASE,
  283. .reg_len = HZIP_CORE_REGS_BASE_LEN,
  284. }, {
  285. .reg_offset = HZIP_CORE_DFX_COMP_0,
  286. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  287. }, {
  288. .reg_offset = HZIP_CORE_DFX_COMP_1,
  289. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  290. }, {
  291. .reg_offset = HZIP_CORE_DFX_DECOMP_0,
  292. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  293. }, {
  294. .reg_offset = HZIP_CORE_DFX_DECOMP_1,
  295. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  296. }, {
  297. .reg_offset = HZIP_CORE_DFX_DECOMP_2,
  298. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  299. }, {
  300. .reg_offset = HZIP_CORE_DFX_DECOMP_3,
  301. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  302. }, {
  303. .reg_offset = HZIP_CORE_DFX_DECOMP_4,
  304. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  305. }, {
  306. .reg_offset = HZIP_CORE_DFX_DECOMP_5,
  307. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  308. },
  309. };
  310. static int hzip_diff_regs_show(struct seq_file *s, void *unused)
  311. {
  312. struct hisi_qm *qm = s->private;
  313. hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
  314. ARRAY_SIZE(hzip_diff_regs));
  315. return 0;
  316. }
  317. DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs);
  318. static int perf_mode_set(const char *val, const struct kernel_param *kp)
  319. {
  320. int ret;
  321. u32 n;
  322. if (!val)
  323. return -EINVAL;
  324. ret = kstrtou32(val, 10, &n);
  325. if (ret != 0 || (n != HZIP_HIGH_COMP_PERF &&
  326. n != HZIP_HIGH_COMP_RATE))
  327. return -EINVAL;
  328. return param_set_int(val, kp);
  329. }
  330. static const struct kernel_param_ops zip_com_perf_ops = {
  331. .set = perf_mode_set,
  332. .get = param_get_int,
  333. };
  334. /*
  335. * perf_mode = 0 means enable high compression rate mode,
  336. * perf_mode = 1 means enable high compression performance mode.
  337. * These two modes only apply to the compression direction.
  338. */
  339. static u32 perf_mode = HZIP_HIGH_COMP_RATE;
  340. module_param_cb(perf_mode, &zip_com_perf_ops, &perf_mode, 0444);
  341. MODULE_PARM_DESC(perf_mode, "ZIP high perf mode 0(default), 1(enable)");
  342. static const struct kernel_param_ops zip_uacce_mode_ops = {
  343. .set = uacce_mode_set,
  344. .get = param_get_int,
  345. };
  346. /*
  347. * uacce_mode = 0 means zip only register to crypto,
  348. * uacce_mode = 1 means zip both register to crypto and uacce.
  349. */
  350. static u32 uacce_mode = UACCE_MODE_NOUACCE;
  351. module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
  352. MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
  353. static bool pf_q_num_flag;
  354. static int pf_q_num_set(const char *val, const struct kernel_param *kp)
  355. {
  356. pf_q_num_flag = true;
  357. return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
  358. }
  359. static const struct kernel_param_ops pf_q_num_ops = {
  360. .set = pf_q_num_set,
  361. .get = param_get_int,
  362. };
  363. static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
  364. module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
  365. MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
  366. static const struct kernel_param_ops vfs_num_ops = {
  367. .set = vfs_num_set,
  368. .get = param_get_int,
  369. };
  370. static u32 vfs_num;
  371. module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
  372. MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
  373. static const struct pci_device_id hisi_zip_dev_ids[] = {
  374. { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) },
  375. { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
  376. { 0, }
  377. };
  378. MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
  379. int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
  380. {
  381. if (node == NUMA_NO_NODE)
  382. node = cpu_to_node(raw_smp_processor_id());
  383. return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
  384. }
  385. bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
  386. {
  387. u32 cap_val;
  388. cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
  389. if ((alg & cap_val) == alg)
  390. return true;
  391. return false;
  392. }
  393. static int hisi_zip_set_high_perf(struct hisi_qm *qm)
  394. {
  395. u32 val;
  396. int ret;
  397. val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
  398. if (perf_mode == HZIP_HIGH_COMP_PERF)
  399. val |= HZIP_HIGH_COMP_PERF;
  400. else
  401. val &= ~HZIP_HIGH_COMP_PERF;
  402. /* Set perf mode */
  403. writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
  404. ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
  405. val, val == perf_mode, HZIP_DELAY_1_US,
  406. HZIP_POLL_TIMEOUT_US);
  407. if (ret)
  408. pci_err(qm->pdev, "failed to set perf mode\n");
  409. return ret;
  410. }
  411. static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
  412. {
  413. u32 val;
  414. int ret;
  415. if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
  416. return;
  417. /* Enable prefetch */
  418. val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
  419. val &= HZIP_PREFETCH_ENABLE;
  420. writel(val, qm->io_base + HZIP_PREFETCH_CFG);
  421. ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
  422. val, !(val & HZIP_SVA_PREFETCH_DISABLE),
  423. HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
  424. if (ret)
  425. pci_err(qm->pdev, "failed to open sva prefetch\n");
  426. }
  427. static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
  428. {
  429. u32 val;
  430. int ret;
  431. if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
  432. return;
  433. val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
  434. val |= HZIP_SVA_PREFETCH_DISABLE;
  435. writel(val, qm->io_base + HZIP_PREFETCH_CFG);
  436. ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
  437. val, !(val & HZIP_SVA_DISABLE_READY),
  438. HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
  439. if (ret)
  440. pci_err(qm->pdev, "failed to close sva prefetch\n");
  441. }
  442. static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
  443. {
  444. u32 val;
  445. if (qm->ver < QM_HW_V3)
  446. return;
  447. val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL);
  448. val |= HZIP_CLOCK_GATED_EN;
  449. writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL);
  450. val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
  451. val |= HZIP_PEH_CFG_AUTO_GATE_EN;
  452. writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
  453. }
  454. static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
  455. {
  456. void __iomem *base = qm->io_base;
  457. u32 dcomp_bm, comp_bm;
  458. /* qm user domain */
  459. writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
  460. writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
  461. writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
  462. writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
  463. writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
  464. /* qm cache */
  465. writel(AXI_M_CFG, base + QM_AXI_M_CFG);
  466. writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
  467. /* disable FLR triggered by BME(bus master enable) */
  468. writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
  469. writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
  470. /* cache */
  471. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
  472. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
  473. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
  474. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
  475. /* user domain configurations */
  476. writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
  477. writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
  478. if (qm->use_sva && qm->ver == QM_HW_V2) {
  479. writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
  480. writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
  481. writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_SGL_RUSER_32_63);
  482. } else {
  483. writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
  484. writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
  485. writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
  486. }
  487. /* let's open all compression/decompression cores */
  488. dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
  489. comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
  490. writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
  491. /* enable sqc,cqc writeback */
  492. writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
  493. CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
  494. FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
  495. hisi_zip_enable_clock_gate(qm);
  496. return 0;
  497. }
  498. static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
  499. {
  500. u32 val1, val2;
  501. val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  502. if (enable) {
  503. val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
  504. val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  505. ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  506. } else {
  507. val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
  508. val2 = 0x0;
  509. }
  510. if (qm->ver > QM_HW_V2)
  511. writel(val2, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
  512. writel(val1, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  513. }
  514. static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
  515. {
  516. u32 nfe, ce;
  517. if (qm->ver == QM_HW_V1) {
  518. writel(HZIP_CORE_INT_MASK_ALL,
  519. qm->io_base + HZIP_CORE_INT_MASK_REG);
  520. dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
  521. return;
  522. }
  523. nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
  524. ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
  525. /* clear ZIP hw error source if having */
  526. writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
  527. /* configure error type */
  528. writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
  529. writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
  530. writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  531. hisi_zip_master_ooo_ctrl(qm, true);
  532. /* enable ZIP hw error interrupts */
  533. writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
  534. }
  535. static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
  536. {
  537. u32 nfe, ce;
  538. /* disable ZIP hw error interrupts */
  539. nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
  540. ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
  541. writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
  542. hisi_zip_master_ooo_ctrl(qm, false);
  543. }
  544. static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
  545. {
  546. struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
  547. return &hisi_zip->qm;
  548. }
  549. static u32 clear_enable_read(struct hisi_qm *qm)
  550. {
  551. return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
  552. HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
  553. }
  554. static int clear_enable_write(struct hisi_qm *qm, u32 val)
  555. {
  556. u32 tmp;
  557. if (val != 1 && val != 0)
  558. return -EINVAL;
  559. tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
  560. ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val;
  561. writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
  562. return 0;
  563. }
  564. static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
  565. size_t count, loff_t *pos)
  566. {
  567. struct ctrl_debug_file *file = filp->private_data;
  568. struct hisi_qm *qm = file_to_qm(file);
  569. char tbuf[HZIP_BUF_SIZE];
  570. u32 val;
  571. int ret;
  572. ret = hisi_qm_get_dfx_access(qm);
  573. if (ret)
  574. return ret;
  575. spin_lock_irq(&file->lock);
  576. switch (file->index) {
  577. case HZIP_CLEAR_ENABLE:
  578. val = clear_enable_read(qm);
  579. break;
  580. default:
  581. goto err_input;
  582. }
  583. spin_unlock_irq(&file->lock);
  584. hisi_qm_put_dfx_access(qm);
  585. ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
  586. return simple_read_from_buffer(buf, count, pos, tbuf, ret);
  587. err_input:
  588. spin_unlock_irq(&file->lock);
  589. hisi_qm_put_dfx_access(qm);
  590. return -EINVAL;
  591. }
  592. static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
  593. const char __user *buf,
  594. size_t count, loff_t *pos)
  595. {
  596. struct ctrl_debug_file *file = filp->private_data;
  597. struct hisi_qm *qm = file_to_qm(file);
  598. char tbuf[HZIP_BUF_SIZE];
  599. unsigned long val;
  600. int len, ret;
  601. if (*pos != 0)
  602. return 0;
  603. if (count >= HZIP_BUF_SIZE)
  604. return -ENOSPC;
  605. len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count);
  606. if (len < 0)
  607. return len;
  608. tbuf[len] = '\0';
  609. ret = kstrtoul(tbuf, 0, &val);
  610. if (ret)
  611. return ret;
  612. ret = hisi_qm_get_dfx_access(qm);
  613. if (ret)
  614. return ret;
  615. spin_lock_irq(&file->lock);
  616. switch (file->index) {
  617. case HZIP_CLEAR_ENABLE:
  618. ret = clear_enable_write(qm, val);
  619. if (ret)
  620. goto err_input;
  621. break;
  622. default:
  623. ret = -EINVAL;
  624. goto err_input;
  625. }
  626. ret = count;
  627. err_input:
  628. spin_unlock_irq(&file->lock);
  629. hisi_qm_put_dfx_access(qm);
  630. return ret;
  631. }
  632. static const struct file_operations ctrl_debug_fops = {
  633. .owner = THIS_MODULE,
  634. .open = simple_open,
  635. .read = hisi_zip_ctrl_debug_read,
  636. .write = hisi_zip_ctrl_debug_write,
  637. };
  638. static int zip_debugfs_atomic64_set(void *data, u64 val)
  639. {
  640. if (val)
  641. return -EINVAL;
  642. atomic64_set((atomic64_t *)data, 0);
  643. return 0;
  644. }
  645. static int zip_debugfs_atomic64_get(void *data, u64 *val)
  646. {
  647. *val = atomic64_read((atomic64_t *)data);
  648. return 0;
  649. }
  650. DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
  651. zip_debugfs_atomic64_set, "%llu\n");
  652. static int hisi_zip_regs_show(struct seq_file *s, void *unused)
  653. {
  654. hisi_qm_regs_dump(s, s->private);
  655. return 0;
  656. }
  657. DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
  658. static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num)
  659. {
  660. u32 zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
  661. if (core_num < zip_comp_core_num)
  662. return qm->io_base + HZIP_CORE_DFX_BASE +
  663. (core_num + 1) * HZIP_CORE_ADDR_INTRVL;
  664. return qm->io_base + HZIP_CORE_DFX_DECOMP_BASE +
  665. (core_num - zip_comp_core_num) * HZIP_CORE_ADDR_INTRVL;
  666. }
  667. static int hisi_zip_core_debug_init(struct hisi_qm *qm)
  668. {
  669. u32 zip_core_num, zip_comp_core_num;
  670. struct device *dev = &qm->pdev->dev;
  671. struct debugfs_regset32 *regset;
  672. struct dentry *tmp_d;
  673. char buf[HZIP_BUF_SIZE];
  674. int i;
  675. zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
  676. zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
  677. for (i = 0; i < zip_core_num; i++) {
  678. if (i < zip_comp_core_num)
  679. scnprintf(buf, sizeof(buf), "comp_core%d", i);
  680. else
  681. scnprintf(buf, sizeof(buf), "decomp_core%d",
  682. i - zip_comp_core_num);
  683. regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
  684. if (!regset)
  685. return -ENOENT;
  686. regset->regs = hzip_dfx_regs;
  687. regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
  688. regset->base = get_zip_core_addr(qm, i);
  689. regset->dev = dev;
  690. tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
  691. debugfs_create_file("regs", 0444, tmp_d, regset,
  692. &hisi_zip_regs_fops);
  693. }
  694. return 0;
  695. }
  696. static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
  697. {
  698. struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
  699. struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
  700. struct hisi_zip_dfx *dfx = &zip->dfx;
  701. struct dentry *tmp_dir;
  702. void *data;
  703. int i;
  704. tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
  705. for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
  706. data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
  707. debugfs_create_file(zip_dfx_files[i].name,
  708. 0644, tmp_dir, data,
  709. &zip_atomic64_ops);
  710. }
  711. if (qm->fun_type == QM_HW_PF && hzip_regs)
  712. debugfs_create_file("diff_regs", 0444, tmp_dir,
  713. qm, &hzip_diff_regs_fops);
  714. }
  715. static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
  716. {
  717. struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
  718. int i;
  719. for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) {
  720. spin_lock_init(&zip->ctrl->files[i].lock);
  721. zip->ctrl->files[i].ctrl = zip->ctrl;
  722. zip->ctrl->files[i].index = i;
  723. debugfs_create_file(ctrl_debug_file_name[i], 0600,
  724. qm->debug.debug_root,
  725. zip->ctrl->files + i,
  726. &ctrl_debug_fops);
  727. }
  728. return hisi_zip_core_debug_init(qm);
  729. }
  730. static int hisi_zip_debugfs_init(struct hisi_qm *qm)
  731. {
  732. struct device *dev = &qm->pdev->dev;
  733. int ret;
  734. ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
  735. if (ret) {
  736. dev_warn(dev, "Failed to init ZIP diff regs!\n");
  737. return ret;
  738. }
  739. qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
  740. qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
  741. qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
  742. hzip_debugfs_root);
  743. hisi_qm_debug_init(qm);
  744. if (qm->fun_type == QM_HW_PF) {
  745. ret = hisi_zip_ctrl_debug_init(qm);
  746. if (ret)
  747. goto debugfs_remove;
  748. }
  749. hisi_zip_dfx_debug_init(qm);
  750. return 0;
  751. debugfs_remove:
  752. debugfs_remove_recursive(qm->debug.debug_root);
  753. hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
  754. return ret;
  755. }
  756. /* hisi_zip_debug_regs_clear() - clear the zip debug regs */
  757. static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
  758. {
  759. u32 zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
  760. int i, j;
  761. /* enable register read_clear bit */
  762. writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
  763. for (i = 0; i < zip_core_num; i++)
  764. for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++)
  765. readl(get_zip_core_addr(qm, i) +
  766. hzip_dfx_regs[j].offset);
  767. /* disable register read_clear bit */
  768. writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
  769. hisi_qm_debug_regs_clear(qm);
  770. }
  771. static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
  772. {
  773. debugfs_remove_recursive(qm->debug.debug_root);
  774. hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
  775. if (qm->fun_type == QM_HW_PF) {
  776. hisi_zip_debug_regs_clear(qm);
  777. qm->debug.curr_qm_qp_num = 0;
  778. }
  779. }
  780. static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
  781. {
  782. int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
  783. int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
  784. struct qm_debug *debug = &qm->debug;
  785. void __iomem *io_base;
  786. u32 zip_core_num;
  787. int i, j, idx;
  788. zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
  789. debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
  790. sizeof(unsigned int), GFP_KERNEL);
  791. if (!debug->last_words)
  792. return -ENOMEM;
  793. for (i = 0; i < com_dfx_regs_num; i++) {
  794. io_base = qm->io_base + hzip_com_dfx_regs[i].offset;
  795. debug->last_words[i] = readl_relaxed(io_base);
  796. }
  797. for (i = 0; i < zip_core_num; i++) {
  798. io_base = get_zip_core_addr(qm, i);
  799. for (j = 0; j < core_dfx_regs_num; j++) {
  800. idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
  801. debug->last_words[idx] = readl_relaxed(
  802. io_base + hzip_dump_dfx_regs[j].offset);
  803. }
  804. }
  805. return 0;
  806. }
  807. static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm)
  808. {
  809. struct qm_debug *debug = &qm->debug;
  810. if (qm->fun_type == QM_HW_VF || !debug->last_words)
  811. return;
  812. kfree(debug->last_words);
  813. debug->last_words = NULL;
  814. }
  815. static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
  816. {
  817. int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
  818. int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
  819. u32 zip_core_num, zip_comp_core_num;
  820. struct qm_debug *debug = &qm->debug;
  821. char buf[HZIP_BUF_SIZE];
  822. void __iomem *base;
  823. int i, j, idx;
  824. u32 val;
  825. if (qm->fun_type == QM_HW_VF || !debug->last_words)
  826. return;
  827. for (i = 0; i < com_dfx_regs_num; i++) {
  828. val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
  829. if (debug->last_words[i] != val)
  830. pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
  831. hzip_com_dfx_regs[i].name, debug->last_words[i], val);
  832. }
  833. zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
  834. zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
  835. for (i = 0; i < zip_core_num; i++) {
  836. if (i < zip_comp_core_num)
  837. scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
  838. else
  839. scnprintf(buf, sizeof(buf), "Decomp_core-%d",
  840. i - zip_comp_core_num);
  841. base = get_zip_core_addr(qm, i);
  842. pci_info(qm->pdev, "==>%s:\n", buf);
  843. /* dump last word for dfx regs during control resetting */
  844. for (j = 0; j < core_dfx_regs_num; j++) {
  845. idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
  846. val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
  847. if (debug->last_words[idx] != val)
  848. pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
  849. hzip_dump_dfx_regs[j].name,
  850. debug->last_words[idx], val);
  851. }
  852. }
  853. }
  854. static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
  855. {
  856. const struct hisi_zip_hw_error *err = zip_hw_error;
  857. struct device *dev = &qm->pdev->dev;
  858. u32 err_val;
  859. while (err->msg) {
  860. if (err->int_msk & err_sts) {
  861. dev_err(dev, "%s [error status=0x%x] found\n",
  862. err->msg, err->int_msk);
  863. if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
  864. err_val = readl(qm->io_base +
  865. HZIP_CORE_SRAM_ECC_ERR_INFO);
  866. dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
  867. ((err_val >>
  868. HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
  869. }
  870. }
  871. err++;
  872. }
  873. }
  874. static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
  875. {
  876. return readl(qm->io_base + HZIP_CORE_INT_STATUS);
  877. }
  878. static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
  879. {
  880. writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
  881. }
  882. static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
  883. {
  884. u32 nfe_mask;
  885. nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
  886. writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  887. }
  888. static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
  889. {
  890. u32 val;
  891. val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  892. writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
  893. qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  894. writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
  895. qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  896. }
  897. static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
  898. {
  899. u32 nfe_enb;
  900. /* Disable ECC Mbit error report. */
  901. nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  902. writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
  903. qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  904. /* Inject zip ECC Mbit error to block master ooo. */
  905. writel(HZIP_CORE_INT_STATUS_M_ECC,
  906. qm->io_base + HZIP_CORE_INT_SET);
  907. }
  908. static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
  909. {
  910. u32 err_status;
  911. err_status = hisi_zip_get_hw_err_status(qm);
  912. if (err_status) {
  913. if (err_status & qm->err_info.ecc_2bits_mask)
  914. qm->err_status.is_dev_ecc_mbit = true;
  915. hisi_zip_log_hw_error(qm, err_status);
  916. if (err_status & qm->err_info.dev_reset_mask) {
  917. /* Disable the same error reporting until device is recovered. */
  918. hisi_zip_disable_error_report(qm, err_status);
  919. return ACC_ERR_NEED_RESET;
  920. }
  921. hisi_zip_clear_hw_err_status(qm, err_status);
  922. }
  923. return ACC_ERR_RECOVERED;
  924. }
  925. static void hisi_zip_err_info_init(struct hisi_qm *qm)
  926. {
  927. struct hisi_qm_err_info *err_info = &qm->err_info;
  928. err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
  929. err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
  930. err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  931. ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
  932. err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
  933. err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  934. ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  935. err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  936. ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  937. err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  938. ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
  939. err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  940. ZIP_RESET_MASK_CAP, qm->cap_ver);
  941. err_info->msi_wr_port = HZIP_WR_PORT;
  942. err_info->acpi_rst = "ZRST";
  943. }
  944. static const struct hisi_qm_err_ini hisi_zip_err_ini = {
  945. .hw_init = hisi_zip_set_user_domain_and_cache,
  946. .hw_err_enable = hisi_zip_hw_error_enable,
  947. .hw_err_disable = hisi_zip_hw_error_disable,
  948. .get_dev_hw_err_status = hisi_zip_get_hw_err_status,
  949. .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
  950. .open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
  951. .close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
  952. .open_sva_prefetch = hisi_zip_open_sva_prefetch,
  953. .close_sva_prefetch = hisi_zip_close_sva_prefetch,
  954. .show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
  955. .err_info_init = hisi_zip_err_info_init,
  956. .get_err_result = hisi_zip_get_err_result,
  957. };
  958. static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
  959. {
  960. struct hisi_qm *qm = &hisi_zip->qm;
  961. struct hisi_zip_ctrl *ctrl;
  962. int ret;
  963. ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
  964. if (!ctrl)
  965. return -ENOMEM;
  966. hisi_zip->ctrl = ctrl;
  967. ctrl->hisi_zip = hisi_zip;
  968. ret = hisi_zip_set_user_domain_and_cache(qm);
  969. if (ret)
  970. return ret;
  971. ret = hisi_zip_set_high_perf(qm);
  972. if (ret)
  973. return ret;
  974. hisi_zip_open_sva_prefetch(qm);
  975. hisi_qm_dev_err_init(qm);
  976. hisi_zip_debug_regs_clear(qm);
  977. ret = hisi_zip_show_last_regs_init(qm);
  978. if (ret)
  979. pci_err(qm->pdev, "Failed to init last word regs!\n");
  980. return ret;
  981. }
  982. static int zip_pre_store_cap_reg(struct hisi_qm *qm)
  983. {
  984. struct hisi_qm_cap_record *zip_cap;
  985. struct pci_dev *pdev = qm->pdev;
  986. size_t i, size;
  987. size = ARRAY_SIZE(zip_pre_store_caps);
  988. zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
  989. if (!zip_cap)
  990. return -ENOMEM;
  991. for (i = 0; i < size; i++) {
  992. zip_cap[i].type = zip_pre_store_caps[i];
  993. zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  994. zip_pre_store_caps[i], qm->cap_ver);
  995. }
  996. qm->cap_tables.dev_cap_table = zip_cap;
  997. return 0;
  998. }
  999. static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
  1000. {
  1001. u64 alg_msk;
  1002. int ret;
  1003. qm->pdev = pdev;
  1004. qm->ver = pdev->revision;
  1005. qm->mode = uacce_mode;
  1006. qm->sqe_size = HZIP_SQE_SIZE;
  1007. qm->dev_name = hisi_zip_name;
  1008. qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ?
  1009. QM_HW_PF : QM_HW_VF;
  1010. if (qm->fun_type == QM_HW_PF) {
  1011. qm->qp_base = HZIP_PF_DEF_Q_BASE;
  1012. qm->qp_num = pf_q_num;
  1013. qm->debug.curr_qm_qp_num = pf_q_num;
  1014. qm->qm_list = &zip_devices;
  1015. qm->err_ini = &hisi_zip_err_ini;
  1016. if (pf_q_num_flag)
  1017. set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
  1018. } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
  1019. /*
  1020. * have no way to get qm configure in VM in v1 hardware,
  1021. * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
  1022. * to trigger only one VF in v1 hardware.
  1023. *
  1024. * v2 hardware has no such problem.
  1025. */
  1026. qm->qp_base = HZIP_PF_DEF_Q_NUM;
  1027. qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
  1028. }
  1029. ret = hisi_qm_init(qm);
  1030. if (ret) {
  1031. pci_err(qm->pdev, "Failed to init zip qm configures!\n");
  1032. return ret;
  1033. }
  1034. /* Fetch and save the value of capability registers */
  1035. ret = zip_pre_store_cap_reg(qm);
  1036. if (ret) {
  1037. pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
  1038. hisi_qm_uninit(qm);
  1039. return ret;
  1040. }
  1041. alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
  1042. ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
  1043. if (ret) {
  1044. pci_err(qm->pdev, "Failed to set zip algs!\n");
  1045. hisi_qm_uninit(qm);
  1046. }
  1047. return ret;
  1048. }
  1049. static void hisi_zip_qm_uninit(struct hisi_qm *qm)
  1050. {
  1051. hisi_qm_uninit(qm);
  1052. }
  1053. static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
  1054. {
  1055. u32 type_rate = HZIP_SHAPER_RATE_COMPRESS;
  1056. struct hisi_qm *qm = &hisi_zip->qm;
  1057. int ret;
  1058. if (qm->fun_type == QM_HW_PF) {
  1059. ret = hisi_zip_pf_probe_init(hisi_zip);
  1060. if (ret)
  1061. return ret;
  1062. /* enable shaper type 0 */
  1063. if (qm->ver >= QM_HW_V3) {
  1064. type_rate |= QM_SHAPER_ENABLE;
  1065. /* ZIP need to enable shaper type 1 */
  1066. type_rate |= HZIP_SHAPER_RATE_DECOMPRESS << QM_SHAPER_TYPE1_OFFSET;
  1067. qm->type_rate = type_rate;
  1068. }
  1069. }
  1070. return 0;
  1071. }
  1072. static void hisi_zip_probe_uninit(struct hisi_qm *qm)
  1073. {
  1074. if (qm->fun_type == QM_HW_VF)
  1075. return;
  1076. hisi_zip_show_last_regs_uninit(qm);
  1077. hisi_zip_close_sva_prefetch(qm);
  1078. hisi_qm_dev_err_uninit(qm);
  1079. }
  1080. static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1081. {
  1082. struct hisi_zip *hisi_zip;
  1083. struct hisi_qm *qm;
  1084. int ret;
  1085. hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
  1086. if (!hisi_zip)
  1087. return -ENOMEM;
  1088. qm = &hisi_zip->qm;
  1089. ret = hisi_zip_qm_init(qm, pdev);
  1090. if (ret) {
  1091. pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
  1092. return ret;
  1093. }
  1094. ret = hisi_zip_probe_init(hisi_zip);
  1095. if (ret) {
  1096. pci_err(pdev, "Failed to probe (%d)!\n", ret);
  1097. goto err_qm_uninit;
  1098. }
  1099. ret = hisi_qm_start(qm);
  1100. if (ret)
  1101. goto err_probe_uninit;
  1102. ret = hisi_zip_debugfs_init(qm);
  1103. if (ret)
  1104. pci_err(pdev, "failed to init debugfs (%d)!\n", ret);
  1105. hisi_qm_add_list(qm, &zip_devices);
  1106. ret = hisi_qm_alg_register(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
  1107. if (ret < 0) {
  1108. pci_err(pdev, "failed to register driver to crypto!\n");
  1109. goto err_qm_del_list;
  1110. }
  1111. if (qm->uacce) {
  1112. ret = uacce_register(qm->uacce);
  1113. if (ret) {
  1114. pci_err(pdev, "failed to register uacce (%d)!\n", ret);
  1115. goto err_qm_alg_unregister;
  1116. }
  1117. }
  1118. if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
  1119. ret = hisi_qm_sriov_enable(pdev, vfs_num);
  1120. if (ret < 0)
  1121. goto err_qm_alg_unregister;
  1122. }
  1123. hisi_qm_pm_init(qm);
  1124. return 0;
  1125. err_qm_alg_unregister:
  1126. hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
  1127. err_qm_del_list:
  1128. hisi_qm_del_list(qm, &zip_devices);
  1129. hisi_zip_debugfs_exit(qm);
  1130. hisi_qm_stop(qm, QM_NORMAL);
  1131. err_probe_uninit:
  1132. hisi_zip_probe_uninit(qm);
  1133. err_qm_uninit:
  1134. hisi_zip_qm_uninit(qm);
  1135. return ret;
  1136. }
  1137. static void hisi_zip_remove(struct pci_dev *pdev)
  1138. {
  1139. struct hisi_qm *qm = pci_get_drvdata(pdev);
  1140. hisi_qm_pm_uninit(qm);
  1141. hisi_qm_wait_task_finish(qm, &zip_devices);
  1142. hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
  1143. hisi_qm_del_list(qm, &zip_devices);
  1144. if (qm->fun_type == QM_HW_PF && qm->vfs_num)
  1145. hisi_qm_sriov_disable(pdev, true);
  1146. hisi_zip_debugfs_exit(qm);
  1147. hisi_qm_stop(qm, QM_NORMAL);
  1148. hisi_zip_probe_uninit(qm);
  1149. hisi_zip_qm_uninit(qm);
  1150. }
  1151. static const struct dev_pm_ops hisi_zip_pm_ops = {
  1152. SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
  1153. };
  1154. static const struct pci_error_handlers hisi_zip_err_handler = {
  1155. .error_detected = hisi_qm_dev_err_detected,
  1156. .slot_reset = hisi_qm_dev_slot_reset,
  1157. .reset_prepare = hisi_qm_reset_prepare,
  1158. .reset_done = hisi_qm_reset_done,
  1159. };
  1160. static struct pci_driver hisi_zip_pci_driver = {
  1161. .name = "hisi_zip",
  1162. .id_table = hisi_zip_dev_ids,
  1163. .probe = hisi_zip_probe,
  1164. .remove = hisi_zip_remove,
  1165. .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
  1166. hisi_qm_sriov_configure : NULL,
  1167. .err_handler = &hisi_zip_err_handler,
  1168. .shutdown = hisi_qm_dev_shutdown,
  1169. .driver.pm = &hisi_zip_pm_ops,
  1170. };
  1171. struct pci_driver *hisi_zip_get_pf_driver(void)
  1172. {
  1173. return &hisi_zip_pci_driver;
  1174. }
  1175. EXPORT_SYMBOL_GPL(hisi_zip_get_pf_driver);
  1176. static void hisi_zip_register_debugfs(void)
  1177. {
  1178. if (!debugfs_initialized())
  1179. return;
  1180. hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
  1181. }
  1182. static void hisi_zip_unregister_debugfs(void)
  1183. {
  1184. debugfs_remove_recursive(hzip_debugfs_root);
  1185. }
  1186. static int __init hisi_zip_init(void)
  1187. {
  1188. int ret;
  1189. hisi_qm_init_list(&zip_devices);
  1190. hisi_zip_register_debugfs();
  1191. ret = pci_register_driver(&hisi_zip_pci_driver);
  1192. if (ret < 0) {
  1193. hisi_zip_unregister_debugfs();
  1194. pr_err("Failed to register pci driver.\n");
  1195. }
  1196. return ret;
  1197. }
  1198. static void __exit hisi_zip_exit(void)
  1199. {
  1200. pci_unregister_driver(&hisi_zip_pci_driver);
  1201. hisi_zip_unregister_debugfs();
  1202. }
  1203. module_init(hisi_zip_init);
  1204. module_exit(hisi_zip_exit);
  1205. MODULE_LICENSE("GPL v2");
  1206. MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
  1207. MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");