sec_main.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <linux/acpi.h>
  4. #include <linux/bitops.h>
  5. #include <linux/debugfs.h>
  6. #include <linux/init.h>
  7. #include <linux/io.h>
  8. #include <linux/iommu.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/pm_runtime.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/topology.h>
  15. #include <linux/uacce.h>
  16. #include "sec.h"
  17. #define SEC_VF_NUM 63
  18. #define SEC_QUEUE_NUM_V1 4096
  19. #define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255
  20. #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
  21. #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
  22. #define SEC_BD_ERR_CHK_EN3 0xffffbfff
  23. #define SEC_SQE_SIZE 128
  24. #define SEC_PF_DEF_Q_NUM 256
  25. #define SEC_PF_DEF_Q_BASE 0
  26. #define SEC_CTX_Q_NUM_DEF 2
  27. #define SEC_CTX_Q_NUM_MAX 32
  28. #define SEC_CTRL_CNT_CLR_CE 0x301120
  29. #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
  30. #define SEC_CORE_INT_SOURCE 0x301010
  31. #define SEC_CORE_INT_MASK 0x301000
  32. #define SEC_CORE_INT_STATUS 0x301008
  33. #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
  34. #define SEC_ECC_NUM 16
  35. #define SEC_ECC_MASH 0xFF
  36. #define SEC_CORE_INT_DISABLE 0x0
  37. #define SEC_RAS_CE_REG 0x301050
  38. #define SEC_RAS_FE_REG 0x301054
  39. #define SEC_RAS_NFE_REG 0x301058
  40. #define SEC_RAS_FE_ENB_MSK 0x0
  41. #define SEC_OOO_SHUTDOWN_SEL 0x301014
  42. #define SEC_RAS_DISABLE 0x0
  43. #define SEC_MEM_START_INIT_REG 0x301100
  44. #define SEC_MEM_INIT_DONE_REG 0x301104
  45. /* clock gating */
  46. #define SEC_CONTROL_REG 0x301200
  47. #define SEC_DYNAMIC_GATE_REG 0x30121c
  48. #define SEC_CORE_AUTO_GATE 0x30212c
  49. #define SEC_DYNAMIC_GATE_EN 0x7fff
  50. #define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0)
  51. #define SEC_CLK_GATE_ENABLE BIT(3)
  52. #define SEC_CLK_GATE_DISABLE (~BIT(3))
  53. #define SEC_TRNG_EN_SHIFT 8
  54. #define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
  55. #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
  56. #define SEC_INTERFACE_USER_CTRL0_REG 0x301220
  57. #define SEC_INTERFACE_USER_CTRL1_REG 0x301224
  58. #define SEC_SAA_EN_REG 0x301270
  59. #define SEC_BD_ERR_CHK_EN_REG0 0x301380
  60. #define SEC_BD_ERR_CHK_EN_REG1 0x301384
  61. #define SEC_BD_ERR_CHK_EN_REG3 0x30138c
  62. #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
  63. #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
  64. #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24)
  65. #define SEC_USER1_ENABLE_DATA_SSV BIT(16)
  66. #define SEC_USER1_WB_CONTEXT_SSV BIT(8)
  67. #define SEC_USER1_WB_DATA_SSV BIT(0)
  68. #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \
  69. SEC_USER1_ENABLE_DATA_SSV | \
  70. SEC_USER1_WB_CONTEXT_SSV | \
  71. SEC_USER1_WB_DATA_SSV)
  72. #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
  73. #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
  74. #define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220
  75. #define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224
  76. #define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5))
  77. #define SEC_USER1_SMMU_MASK_V3 0xFF79E79E
  78. #define SEC_CORE_INT_STATUS_M_ECC BIT(2)
  79. #define SEC_PREFETCH_CFG 0x301130
  80. #define SEC_SVA_TRANS 0x301EC4
  81. #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
  82. #define SEC_PREFETCH_DISABLE BIT(1)
  83. #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
  84. #define SEC_DELAY_10_US 10
  85. #define SEC_POLL_TIMEOUT_US 1000
  86. #define SEC_DBGFS_VAL_MAX_LEN 20
  87. #define SEC_SINGLE_PORT_MAX_TRANS 0x2060
  88. #define SEC_SQE_MASK_OFFSET 16
  89. #define SEC_SQE_MASK_LEN 108
  90. #define SEC_SHAPER_TYPE_RATE 400
  91. #define SEC_DFX_BASE 0x301000
  92. #define SEC_DFX_CORE 0x302100
  93. #define SEC_DFX_COMMON1 0x301600
  94. #define SEC_DFX_COMMON2 0x301C00
  95. #define SEC_DFX_BASE_LEN 0x9D
  96. #define SEC_DFX_CORE_LEN 0x32B
  97. #define SEC_DFX_COMMON1_LEN 0x45
  98. #define SEC_DFX_COMMON2_LEN 0xBA
  99. #define SEC_ALG_BITMAP_SHIFT 32
  100. #define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \
  101. GENMASK(24, 21))
  102. #define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \
  103. GENMASK_ULL(42, 25))
  104. #define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
  105. GENMASK_ULL(45, 43))
  106. struct sec_hw_error {
  107. u32 int_msk;
  108. const char *msg;
  109. };
  110. struct sec_dfx_item {
  111. const char *name;
  112. u32 offset;
  113. };
  114. static const char sec_name[] = "hisi_sec2";
  115. static struct dentry *sec_debugfs_root;
  116. static struct hisi_qm_list sec_devices = {
  117. .register_to_crypto = sec_register_to_crypto,
  118. .unregister_from_crypto = sec_unregister_from_crypto,
  119. };
  120. static const struct hisi_qm_cap_info sec_basic_info[] = {
  121. {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77},
  122. {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77},
  123. {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
  124. {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
  125. {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177},
  126. {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},
  127. {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},
  128. {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},
  129. {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},
  130. {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
  131. {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
  132. {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
  133. {SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
  134. {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
  135. {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
  136. {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
  137. {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
  138. {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
  139. {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
  140. {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
  141. {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
  142. {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
  143. {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
  144. {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
  145. {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
  146. };
  147. static const u32 sec_pre_store_caps[] = {
  148. SEC_DRV_ALG_BITMAP_LOW,
  149. SEC_DRV_ALG_BITMAP_HIGH,
  150. SEC_DEV_ALG_BITMAP_LOW,
  151. SEC_DEV_ALG_BITMAP_HIGH,
  152. };
  153. static const struct qm_dev_alg sec_dev_algs[] = { {
  154. .alg_msk = SEC_CIPHER_BITMAP,
  155. .alg = "cipher\n",
  156. }, {
  157. .alg_msk = SEC_DIGEST_BITMAP,
  158. .alg = "digest\n",
  159. }, {
  160. .alg_msk = SEC_AEAD_BITMAP,
  161. .alg = "aead\n",
  162. },
  163. };
  164. static const struct sec_hw_error sec_hw_errors[] = {
  165. {
  166. .int_msk = BIT(0),
  167. .msg = "sec_axi_rresp_err_rint"
  168. },
  169. {
  170. .int_msk = BIT(1),
  171. .msg = "sec_axi_bresp_err_rint"
  172. },
  173. {
  174. .int_msk = BIT(2),
  175. .msg = "sec_ecc_2bit_err_rint"
  176. },
  177. {
  178. .int_msk = BIT(3),
  179. .msg = "sec_ecc_1bit_err_rint"
  180. },
  181. {
  182. .int_msk = BIT(4),
  183. .msg = "sec_req_trng_timeout_rint"
  184. },
  185. {
  186. .int_msk = BIT(5),
  187. .msg = "sec_fsm_hbeat_rint"
  188. },
  189. {
  190. .int_msk = BIT(6),
  191. .msg = "sec_channel_req_rng_timeout_rint"
  192. },
  193. {
  194. .int_msk = BIT(7),
  195. .msg = "sec_bd_err_rint"
  196. },
  197. {
  198. .int_msk = BIT(8),
  199. .msg = "sec_chain_buff_err_rint"
  200. },
  201. {
  202. .int_msk = BIT(14),
  203. .msg = "sec_no_secure_access"
  204. },
  205. {
  206. .int_msk = BIT(15),
  207. .msg = "sec_wrapping_key_auth_err"
  208. },
  209. {
  210. .int_msk = BIT(16),
  211. .msg = "sec_km_key_crc_fail"
  212. },
  213. {
  214. .int_msk = BIT(17),
  215. .msg = "sec_axi_poison_err"
  216. },
  217. {
  218. .int_msk = BIT(18),
  219. .msg = "sec_sva_err"
  220. },
  221. {}
  222. };
  223. static const char * const sec_dbg_file_name[] = {
  224. [SEC_CLEAR_ENABLE] = "clear_enable",
  225. };
  226. static struct sec_dfx_item sec_dfx_labels[] = {
  227. {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
  228. {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
  229. {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
  230. {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
  231. {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
  232. {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
  233. {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
  234. };
  235. static const struct debugfs_reg32 sec_dfx_regs[] = {
  236. {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
  237. {"SEC_SAA_EN ", 0x301270},
  238. {"SEC_BD_LATENCY_MIN ", 0x301600},
  239. {"SEC_BD_LATENCY_MAX ", 0x301608},
  240. {"SEC_BD_LATENCY_AVG ", 0x30160C},
  241. {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
  242. {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
  243. {"SEC_BD_NUM_IN_SEC ", 0x301680},
  244. {"SEC_ECC_1BIT_CNT ", 0x301C00},
  245. {"SEC_ECC_1BIT_INFO ", 0x301C04},
  246. {"SEC_ECC_2BIT_CNT ", 0x301C10},
  247. {"SEC_ECC_2BIT_INFO ", 0x301C14},
  248. {"SEC_BD_SAA0 ", 0x301C20},
  249. {"SEC_BD_SAA1 ", 0x301C24},
  250. {"SEC_BD_SAA2 ", 0x301C28},
  251. {"SEC_BD_SAA3 ", 0x301C2C},
  252. {"SEC_BD_SAA4 ", 0x301C30},
  253. {"SEC_BD_SAA5 ", 0x301C34},
  254. {"SEC_BD_SAA6 ", 0x301C38},
  255. {"SEC_BD_SAA7 ", 0x301C3C},
  256. {"SEC_BD_SAA8 ", 0x301C40},
  257. {"SEC_RAS_CE_ENABLE ", 0x301050},
  258. {"SEC_RAS_FE_ENABLE ", 0x301054},
  259. {"SEC_RAS_NFE_ENABLE ", 0x301058},
  260. {"SEC_REQ_TRNG_TIME_TH ", 0x30112C},
  261. {"SEC_CHANNEL_RNG_REQ_THLD ", 0x302110},
  262. };
  263. /* define the SEC's dfx regs region and region length */
  264. static struct dfx_diff_registers sec_diff_regs[] = {
  265. {
  266. .reg_offset = SEC_DFX_BASE,
  267. .reg_len = SEC_DFX_BASE_LEN,
  268. }, {
  269. .reg_offset = SEC_DFX_COMMON1,
  270. .reg_len = SEC_DFX_COMMON1_LEN,
  271. }, {
  272. .reg_offset = SEC_DFX_COMMON2,
  273. .reg_len = SEC_DFX_COMMON2_LEN,
  274. }, {
  275. .reg_offset = SEC_DFX_CORE,
  276. .reg_len = SEC_DFX_CORE_LEN,
  277. },
  278. };
  279. static int sec_diff_regs_show(struct seq_file *s, void *unused)
  280. {
  281. struct hisi_qm *qm = s->private;
  282. hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
  283. ARRAY_SIZE(sec_diff_regs));
  284. return 0;
  285. }
  286. DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
  287. static bool pf_q_num_flag;
  288. static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
  289. {
  290. pf_q_num_flag = true;
  291. return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
  292. }
  293. static const struct kernel_param_ops sec_pf_q_num_ops = {
  294. .set = sec_pf_q_num_set,
  295. .get = param_get_int,
  296. };
  297. static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
  298. module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
  299. MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
  300. static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
  301. {
  302. u32 ctx_q_num;
  303. int ret;
  304. if (!val)
  305. return -EINVAL;
  306. ret = kstrtou32(val, 10, &ctx_q_num);
  307. if (ret)
  308. return -EINVAL;
  309. if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
  310. pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
  311. return -EINVAL;
  312. }
  313. return param_set_int(val, kp);
  314. }
  315. static const struct kernel_param_ops sec_ctx_q_num_ops = {
  316. .set = sec_ctx_q_num_set,
  317. .get = param_get_int,
  318. };
  319. static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
  320. module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
  321. MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
  322. static const struct kernel_param_ops vfs_num_ops = {
  323. .set = vfs_num_set,
  324. .get = param_get_int,
  325. };
  326. static u32 vfs_num;
  327. module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
  328. MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
  329. void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
  330. {
  331. hisi_qm_free_qps(qps, qp_num);
  332. kfree(qps);
  333. }
  334. struct hisi_qp **sec_create_qps(void)
  335. {
  336. int node = cpu_to_node(raw_smp_processor_id());
  337. u32 ctx_num = ctx_q_num;
  338. struct hisi_qp **qps;
  339. int ret;
  340. qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
  341. if (!qps)
  342. return NULL;
  343. ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
  344. if (!ret)
  345. return qps;
  346. kfree(qps);
  347. return NULL;
  348. }
  349. u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
  350. {
  351. u32 cap_val_h, cap_val_l;
  352. cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val;
  353. cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val;
  354. return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
  355. }
  356. static const struct kernel_param_ops sec_uacce_mode_ops = {
  357. .set = uacce_mode_set,
  358. .get = param_get_int,
  359. };
  360. /*
  361. * uacce_mode = 0 means sec only register to crypto,
  362. * uacce_mode = 1 means sec both register to crypto and uacce.
  363. */
  364. static u32 uacce_mode = UACCE_MODE_NOUACCE;
  365. module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
  366. MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
  367. static const struct pci_device_id sec_dev_ids[] = {
  368. { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) },
  369. { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
  370. { 0, }
  371. };
  372. MODULE_DEVICE_TABLE(pci, sec_dev_ids);
  373. static void sec_set_endian(struct hisi_qm *qm)
  374. {
  375. u32 reg;
  376. reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
  377. reg &= ~(BIT(1) | BIT(0));
  378. if (!IS_ENABLED(CONFIG_64BIT))
  379. reg |= BIT(1);
  380. if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
  381. reg |= BIT(0);
  382. writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
  383. }
  384. static void sec_engine_sva_config(struct hisi_qm *qm)
  385. {
  386. u32 reg;
  387. if (qm->ver > QM_HW_V2) {
  388. reg = readl_relaxed(qm->io_base +
  389. SEC_INTERFACE_USER_CTRL0_REG_V3);
  390. reg |= SEC_USER0_SMMU_NORMAL;
  391. writel_relaxed(reg, qm->io_base +
  392. SEC_INTERFACE_USER_CTRL0_REG_V3);
  393. reg = readl_relaxed(qm->io_base +
  394. SEC_INTERFACE_USER_CTRL1_REG_V3);
  395. reg &= SEC_USER1_SMMU_MASK_V3;
  396. reg |= SEC_USER1_SMMU_NORMAL_V3;
  397. writel_relaxed(reg, qm->io_base +
  398. SEC_INTERFACE_USER_CTRL1_REG_V3);
  399. } else {
  400. reg = readl_relaxed(qm->io_base +
  401. SEC_INTERFACE_USER_CTRL0_REG);
  402. reg |= SEC_USER0_SMMU_NORMAL;
  403. writel_relaxed(reg, qm->io_base +
  404. SEC_INTERFACE_USER_CTRL0_REG);
  405. reg = readl_relaxed(qm->io_base +
  406. SEC_INTERFACE_USER_CTRL1_REG);
  407. reg &= SEC_USER1_SMMU_MASK;
  408. if (qm->use_sva)
  409. reg |= SEC_USER1_SMMU_SVA;
  410. else
  411. reg |= SEC_USER1_SMMU_NORMAL;
  412. writel_relaxed(reg, qm->io_base +
  413. SEC_INTERFACE_USER_CTRL1_REG);
  414. }
  415. }
  416. static void sec_open_sva_prefetch(struct hisi_qm *qm)
  417. {
  418. u32 val;
  419. int ret;
  420. if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
  421. return;
  422. /* Enable prefetch */
  423. val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
  424. val &= SEC_PREFETCH_ENABLE;
  425. writel(val, qm->io_base + SEC_PREFETCH_CFG);
  426. ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
  427. val, !(val & SEC_PREFETCH_DISABLE),
  428. SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
  429. if (ret)
  430. pci_err(qm->pdev, "failed to open sva prefetch\n");
  431. }
  432. static void sec_close_sva_prefetch(struct hisi_qm *qm)
  433. {
  434. u32 val;
  435. int ret;
  436. if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
  437. return;
  438. val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
  439. val |= SEC_PREFETCH_DISABLE;
  440. writel(val, qm->io_base + SEC_PREFETCH_CFG);
  441. ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
  442. val, !(val & SEC_SVA_DISABLE_READY),
  443. SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
  444. if (ret)
  445. pci_err(qm->pdev, "failed to close sva prefetch\n");
  446. }
  447. static void sec_enable_clock_gate(struct hisi_qm *qm)
  448. {
  449. u32 val;
  450. if (qm->ver < QM_HW_V3)
  451. return;
  452. val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
  453. val |= SEC_CLK_GATE_ENABLE;
  454. writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
  455. val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
  456. val |= SEC_DYNAMIC_GATE_EN;
  457. writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
  458. val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
  459. val |= SEC_CORE_AUTO_GATE_EN;
  460. writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
  461. }
  462. static void sec_disable_clock_gate(struct hisi_qm *qm)
  463. {
  464. u32 val;
  465. /* Kunpeng920 needs to close clock gating */
  466. val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
  467. val &= SEC_CLK_GATE_DISABLE;
  468. writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
  469. }
  470. static int sec_engine_init(struct hisi_qm *qm)
  471. {
  472. int ret;
  473. u32 reg;
  474. /* disable clock gate control before mem init */
  475. sec_disable_clock_gate(qm);
  476. writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
  477. ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
  478. reg, reg & 0x1, SEC_DELAY_10_US,
  479. SEC_POLL_TIMEOUT_US);
  480. if (ret) {
  481. pci_err(qm->pdev, "fail to init sec mem\n");
  482. return ret;
  483. }
  484. reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
  485. reg |= (0x1 << SEC_TRNG_EN_SHIFT);
  486. writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
  487. sec_engine_sva_config(qm);
  488. writel(SEC_SINGLE_PORT_MAX_TRANS,
  489. qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
  490. reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
  491. writel(reg, qm->io_base + SEC_SAA_EN_REG);
  492. if (qm->ver < QM_HW_V3) {
  493. /* HW V2 enable sm4 extra mode, as ctr/ecb */
  494. writel_relaxed(SEC_BD_ERR_CHK_EN0,
  495. qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
  496. /* HW V2 enable sm4 xts mode multiple iv */
  497. writel_relaxed(SEC_BD_ERR_CHK_EN1,
  498. qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
  499. writel_relaxed(SEC_BD_ERR_CHK_EN3,
  500. qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
  501. }
  502. /* config endian */
  503. sec_set_endian(qm);
  504. sec_enable_clock_gate(qm);
  505. return 0;
  506. }
  507. static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
  508. {
  509. /* qm user domain */
  510. writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
  511. writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
  512. writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
  513. writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
  514. writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
  515. /* qm cache */
  516. writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
  517. writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
  518. /* disable FLR triggered by BME(bus master enable) */
  519. writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
  520. writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
  521. /* enable sqc,cqc writeback */
  522. writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
  523. CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
  524. FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
  525. return sec_engine_init(qm);
  526. }
  527. /* sec_debug_regs_clear() - clear the sec debug regs */
  528. static void sec_debug_regs_clear(struct hisi_qm *qm)
  529. {
  530. int i;
  531. /* clear sec dfx regs */
  532. writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
  533. for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
  534. readl(qm->io_base + sec_dfx_regs[i].offset);
  535. /* clear rdclr_en */
  536. writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
  537. hisi_qm_debug_regs_clear(qm);
  538. }
  539. static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
  540. {
  541. u32 val1, val2;
  542. val1 = readl(qm->io_base + SEC_CONTROL_REG);
  543. if (enable) {
  544. val1 |= SEC_AXI_SHUTDOWN_ENABLE;
  545. val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
  546. SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  547. } else {
  548. val1 &= SEC_AXI_SHUTDOWN_DISABLE;
  549. val2 = 0x0;
  550. }
  551. if (qm->ver > QM_HW_V2)
  552. writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
  553. writel(val1, qm->io_base + SEC_CONTROL_REG);
  554. }
  555. static void sec_hw_error_enable(struct hisi_qm *qm)
  556. {
  557. u32 ce, nfe;
  558. if (qm->ver == QM_HW_V1) {
  559. writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
  560. pci_info(qm->pdev, "V1 not support hw error handle\n");
  561. return;
  562. }
  563. ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
  564. nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
  565. /* clear SEC hw error source if having */
  566. writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
  567. /* enable RAS int */
  568. writel(ce, qm->io_base + SEC_RAS_CE_REG);
  569. writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
  570. writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
  571. /* enable SEC block master OOO when nfe occurs on Kunpeng930 */
  572. sec_master_ooo_ctrl(qm, true);
  573. /* enable SEC hw error interrupts */
  574. writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
  575. }
  576. static void sec_hw_error_disable(struct hisi_qm *qm)
  577. {
  578. /* disable SEC hw error interrupts */
  579. writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
  580. /* disable SEC block master OOO when nfe occurs on Kunpeng930 */
  581. sec_master_ooo_ctrl(qm, false);
  582. /* disable RAS int */
  583. writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
  584. writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
  585. writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
  586. }
  587. static u32 sec_clear_enable_read(struct hisi_qm *qm)
  588. {
  589. return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
  590. SEC_CTRL_CNT_CLR_CE_BIT;
  591. }
  592. static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
  593. {
  594. u32 tmp;
  595. if (val != 1 && val)
  596. return -EINVAL;
  597. tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
  598. ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
  599. writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
  600. return 0;
  601. }
  602. static ssize_t sec_debug_read(struct file *filp, char __user *buf,
  603. size_t count, loff_t *pos)
  604. {
  605. struct sec_debug_file *file = filp->private_data;
  606. char tbuf[SEC_DBGFS_VAL_MAX_LEN];
  607. struct hisi_qm *qm = file->qm;
  608. u32 val;
  609. int ret;
  610. ret = hisi_qm_get_dfx_access(qm);
  611. if (ret)
  612. return ret;
  613. spin_lock_irq(&file->lock);
  614. switch (file->index) {
  615. case SEC_CLEAR_ENABLE:
  616. val = sec_clear_enable_read(qm);
  617. break;
  618. default:
  619. goto err_input;
  620. }
  621. spin_unlock_irq(&file->lock);
  622. hisi_qm_put_dfx_access(qm);
  623. ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
  624. return simple_read_from_buffer(buf, count, pos, tbuf, ret);
  625. err_input:
  626. spin_unlock_irq(&file->lock);
  627. hisi_qm_put_dfx_access(qm);
  628. return -EINVAL;
  629. }
  630. static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
  631. size_t count, loff_t *pos)
  632. {
  633. struct sec_debug_file *file = filp->private_data;
  634. char tbuf[SEC_DBGFS_VAL_MAX_LEN];
  635. struct hisi_qm *qm = file->qm;
  636. unsigned long val;
  637. int len, ret;
  638. if (*pos != 0)
  639. return 0;
  640. if (count >= SEC_DBGFS_VAL_MAX_LEN)
  641. return -ENOSPC;
  642. len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
  643. pos, buf, count);
  644. if (len < 0)
  645. return len;
  646. tbuf[len] = '\0';
  647. if (kstrtoul(tbuf, 0, &val))
  648. return -EFAULT;
  649. ret = hisi_qm_get_dfx_access(qm);
  650. if (ret)
  651. return ret;
  652. spin_lock_irq(&file->lock);
  653. switch (file->index) {
  654. case SEC_CLEAR_ENABLE:
  655. ret = sec_clear_enable_write(qm, val);
  656. if (ret)
  657. goto err_input;
  658. break;
  659. default:
  660. ret = -EINVAL;
  661. goto err_input;
  662. }
  663. ret = count;
  664. err_input:
  665. spin_unlock_irq(&file->lock);
  666. hisi_qm_put_dfx_access(qm);
  667. return ret;
  668. }
  669. static const struct file_operations sec_dbg_fops = {
  670. .owner = THIS_MODULE,
  671. .open = simple_open,
  672. .read = sec_debug_read,
  673. .write = sec_debug_write,
  674. };
  675. static int sec_debugfs_atomic64_get(void *data, u64 *val)
  676. {
  677. *val = atomic64_read((atomic64_t *)data);
  678. return 0;
  679. }
  680. static int sec_debugfs_atomic64_set(void *data, u64 val)
  681. {
  682. if (val)
  683. return -EINVAL;
  684. atomic64_set((atomic64_t *)data, 0);
  685. return 0;
  686. }
  687. DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
  688. sec_debugfs_atomic64_set, "%lld\n");
  689. static int sec_regs_show(struct seq_file *s, void *unused)
  690. {
  691. hisi_qm_regs_dump(s, s->private);
  692. return 0;
  693. }
  694. DEFINE_SHOW_ATTRIBUTE(sec_regs);
  695. static int sec_core_debug_init(struct hisi_qm *qm)
  696. {
  697. struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
  698. struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
  699. struct device *dev = &qm->pdev->dev;
  700. struct sec_dfx *dfx = &sec->debug.dfx;
  701. struct debugfs_regset32 *regset;
  702. struct dentry *tmp_d;
  703. int i;
  704. tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
  705. regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
  706. if (!regset)
  707. return -ENOMEM;
  708. regset->regs = sec_dfx_regs;
  709. regset->nregs = ARRAY_SIZE(sec_dfx_regs);
  710. regset->base = qm->io_base;
  711. regset->dev = dev;
  712. if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF)
  713. debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
  714. if (qm->fun_type == QM_HW_PF && sec_regs)
  715. debugfs_create_file("diff_regs", 0444, tmp_d,
  716. qm, &sec_diff_regs_fops);
  717. for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
  718. atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
  719. sec_dfx_labels[i].offset);
  720. debugfs_create_file(sec_dfx_labels[i].name, 0644,
  721. tmp_d, data, &sec_atomic64_ops);
  722. }
  723. return 0;
  724. }
  725. static int sec_debug_init(struct hisi_qm *qm)
  726. {
  727. struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
  728. int i;
  729. if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) {
  730. for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
  731. spin_lock_init(&sec->debug.files[i].lock);
  732. sec->debug.files[i].index = i;
  733. sec->debug.files[i].qm = qm;
  734. debugfs_create_file(sec_dbg_file_name[i], 0600,
  735. qm->debug.debug_root,
  736. sec->debug.files + i,
  737. &sec_dbg_fops);
  738. }
  739. }
  740. return sec_core_debug_init(qm);
  741. }
  742. static int sec_debugfs_init(struct hisi_qm *qm)
  743. {
  744. struct device *dev = &qm->pdev->dev;
  745. int ret;
  746. ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
  747. if (ret) {
  748. dev_warn(dev, "Failed to init SEC diff regs!\n");
  749. return ret;
  750. }
  751. qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
  752. sec_debugfs_root);
  753. qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
  754. qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
  755. hisi_qm_debug_init(qm);
  756. ret = sec_debug_init(qm);
  757. if (ret)
  758. goto debugfs_remove;
  759. return 0;
  760. debugfs_remove:
  761. debugfs_remove_recursive(qm->debug.debug_root);
  762. hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
  763. return ret;
  764. }
  765. static void sec_debugfs_exit(struct hisi_qm *qm)
  766. {
  767. debugfs_remove_recursive(qm->debug.debug_root);
  768. hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
  769. }
  770. static int sec_show_last_regs_init(struct hisi_qm *qm)
  771. {
  772. struct qm_debug *debug = &qm->debug;
  773. int i;
  774. debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs),
  775. sizeof(unsigned int), GFP_KERNEL);
  776. if (!debug->last_words)
  777. return -ENOMEM;
  778. for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
  779. debug->last_words[i] = readl_relaxed(qm->io_base +
  780. sec_dfx_regs[i].offset);
  781. return 0;
  782. }
  783. static void sec_show_last_regs_uninit(struct hisi_qm *qm)
  784. {
  785. struct qm_debug *debug = &qm->debug;
  786. if (qm->fun_type == QM_HW_VF || !debug->last_words)
  787. return;
  788. kfree(debug->last_words);
  789. debug->last_words = NULL;
  790. }
  791. static void sec_show_last_dfx_regs(struct hisi_qm *qm)
  792. {
  793. struct qm_debug *debug = &qm->debug;
  794. struct pci_dev *pdev = qm->pdev;
  795. u32 val;
  796. int i;
  797. if (qm->fun_type == QM_HW_VF || !debug->last_words)
  798. return;
  799. /* dumps last word of the debugging registers during controller reset */
  800. for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) {
  801. val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset);
  802. if (val != debug->last_words[i])
  803. pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
  804. sec_dfx_regs[i].name, debug->last_words[i], val);
  805. }
  806. }
  807. static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
  808. {
  809. const struct sec_hw_error *errs = sec_hw_errors;
  810. struct device *dev = &qm->pdev->dev;
  811. u32 err_val;
  812. while (errs->msg) {
  813. if (errs->int_msk & err_sts) {
  814. dev_err(dev, "%s [error status=0x%x] found\n",
  815. errs->msg, errs->int_msk);
  816. if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
  817. err_val = readl(qm->io_base +
  818. SEC_CORE_SRAM_ECC_ERR_INFO);
  819. dev_err(dev, "multi ecc sram num=0x%x\n",
  820. ((err_val) >> SEC_ECC_NUM) &
  821. SEC_ECC_MASH);
  822. }
  823. }
  824. errs++;
  825. }
  826. }
  827. static u32 sec_get_hw_err_status(struct hisi_qm *qm)
  828. {
  829. return readl(qm->io_base + SEC_CORE_INT_STATUS);
  830. }
  831. static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
  832. {
  833. writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
  834. }
  835. static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
  836. {
  837. u32 nfe_mask;
  838. nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
  839. writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
  840. }
  841. static void sec_open_axi_master_ooo(struct hisi_qm *qm)
  842. {
  843. u32 val;
  844. val = readl(qm->io_base + SEC_CONTROL_REG);
  845. writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
  846. writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
  847. }
  848. static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
  849. {
  850. u32 err_status;
  851. err_status = sec_get_hw_err_status(qm);
  852. if (err_status) {
  853. if (err_status & qm->err_info.ecc_2bits_mask)
  854. qm->err_status.is_dev_ecc_mbit = true;
  855. sec_log_hw_error(qm, err_status);
  856. if (err_status & qm->err_info.dev_reset_mask) {
  857. /* Disable the same error reporting until device is recovered. */
  858. sec_disable_error_report(qm, err_status);
  859. return ACC_ERR_NEED_RESET;
  860. }
  861. sec_clear_hw_err_status(qm, err_status);
  862. }
  863. return ACC_ERR_RECOVERED;
  864. }
  865. static void sec_err_info_init(struct hisi_qm *qm)
  866. {
  867. struct hisi_qm_err_info *err_info = &qm->err_info;
  868. err_info->fe = SEC_RAS_FE_ENB_MSK;
  869. err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
  870. err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
  871. err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
  872. err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
  873. SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  874. err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
  875. SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  876. err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
  877. SEC_QM_RESET_MASK_CAP, qm->cap_ver);
  878. err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
  879. SEC_RESET_MASK_CAP, qm->cap_ver);
  880. err_info->msi_wr_port = BIT(0);
  881. err_info->acpi_rst = "SRST";
  882. }
  883. static const struct hisi_qm_err_ini sec_err_ini = {
  884. .hw_init = sec_set_user_domain_and_cache,
  885. .hw_err_enable = sec_hw_error_enable,
  886. .hw_err_disable = sec_hw_error_disable,
  887. .get_dev_hw_err_status = sec_get_hw_err_status,
  888. .clear_dev_hw_err_status = sec_clear_hw_err_status,
  889. .open_axi_master_ooo = sec_open_axi_master_ooo,
  890. .open_sva_prefetch = sec_open_sva_prefetch,
  891. .close_sva_prefetch = sec_close_sva_prefetch,
  892. .show_last_dfx_regs = sec_show_last_dfx_regs,
  893. .err_info_init = sec_err_info_init,
  894. .get_err_result = sec_get_err_result,
  895. };
  896. static int sec_pf_probe_init(struct sec_dev *sec)
  897. {
  898. struct hisi_qm *qm = &sec->qm;
  899. int ret;
  900. ret = sec_set_user_domain_and_cache(qm);
  901. if (ret)
  902. return ret;
  903. sec_open_sva_prefetch(qm);
  904. hisi_qm_dev_err_init(qm);
  905. sec_debug_regs_clear(qm);
  906. ret = sec_show_last_regs_init(qm);
  907. if (ret)
  908. pci_err(qm->pdev, "Failed to init last word regs!\n");
  909. return ret;
  910. }
  911. static int sec_pre_store_cap_reg(struct hisi_qm *qm)
  912. {
  913. struct hisi_qm_cap_record *sec_cap;
  914. struct pci_dev *pdev = qm->pdev;
  915. size_t i, size;
  916. size = ARRAY_SIZE(sec_pre_store_caps);
  917. sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
  918. if (!sec_cap)
  919. return -ENOMEM;
  920. for (i = 0; i < size; i++) {
  921. sec_cap[i].type = sec_pre_store_caps[i];
  922. sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
  923. sec_pre_store_caps[i], qm->cap_ver);
  924. }
  925. qm->cap_tables.dev_cap_table = sec_cap;
  926. return 0;
  927. }
  928. static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
  929. {
  930. u64 alg_msk;
  931. int ret;
  932. qm->pdev = pdev;
  933. qm->ver = pdev->revision;
  934. qm->mode = uacce_mode;
  935. qm->sqe_size = SEC_SQE_SIZE;
  936. qm->dev_name = sec_name;
  937. qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ?
  938. QM_HW_PF : QM_HW_VF;
  939. if (qm->fun_type == QM_HW_PF) {
  940. qm->qp_base = SEC_PF_DEF_Q_BASE;
  941. qm->qp_num = pf_q_num;
  942. qm->debug.curr_qm_qp_num = pf_q_num;
  943. qm->qm_list = &sec_devices;
  944. qm->err_ini = &sec_err_ini;
  945. if (pf_q_num_flag)
  946. set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
  947. } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
  948. /*
  949. * have no way to get qm configure in VM in v1 hardware,
  950. * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
  951. * to trigger only one VF in v1 hardware.
  952. * v2 hardware has no such problem.
  953. */
  954. qm->qp_base = SEC_PF_DEF_Q_NUM;
  955. qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
  956. }
  957. ret = hisi_qm_init(qm);
  958. if (ret) {
  959. pci_err(qm->pdev, "Failed to init sec qm configures!\n");
  960. return ret;
  961. }
  962. /* Fetch and save the value of capability registers */
  963. ret = sec_pre_store_cap_reg(qm);
  964. if (ret) {
  965. pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
  966. hisi_qm_uninit(qm);
  967. return ret;
  968. }
  969. alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
  970. ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
  971. if (ret) {
  972. pci_err(qm->pdev, "Failed to set sec algs!\n");
  973. hisi_qm_uninit(qm);
  974. }
  975. return ret;
  976. }
  977. static void sec_qm_uninit(struct hisi_qm *qm)
  978. {
  979. hisi_qm_uninit(qm);
  980. }
  981. static int sec_probe_init(struct sec_dev *sec)
  982. {
  983. u32 type_rate = SEC_SHAPER_TYPE_RATE;
  984. struct hisi_qm *qm = &sec->qm;
  985. int ret;
  986. if (qm->fun_type == QM_HW_PF) {
  987. ret = sec_pf_probe_init(sec);
  988. if (ret)
  989. return ret;
  990. /* enable shaper type 0 */
  991. if (qm->ver >= QM_HW_V3) {
  992. type_rate |= QM_SHAPER_ENABLE;
  993. qm->type_rate = type_rate;
  994. }
  995. }
  996. return 0;
  997. }
  998. static void sec_probe_uninit(struct hisi_qm *qm)
  999. {
  1000. if (qm->fun_type == QM_HW_VF)
  1001. return;
  1002. sec_debug_regs_clear(qm);
  1003. sec_show_last_regs_uninit(qm);
  1004. sec_close_sva_prefetch(qm);
  1005. hisi_qm_dev_err_uninit(qm);
  1006. }
  1007. static void sec_iommu_used_check(struct sec_dev *sec)
  1008. {
  1009. struct iommu_domain *domain;
  1010. struct device *dev = &sec->qm.pdev->dev;
  1011. domain = iommu_get_domain_for_dev(dev);
  1012. /* Check if iommu is used */
  1013. sec->iommu_used = false;
  1014. if (domain) {
  1015. if (domain->type & __IOMMU_DOMAIN_PAGING)
  1016. sec->iommu_used = true;
  1017. dev_info(dev, "SMMU Opened, the iommu type = %u\n",
  1018. domain->type);
  1019. }
  1020. }
  1021. static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1022. {
  1023. struct sec_dev *sec;
  1024. struct hisi_qm *qm;
  1025. int ret;
  1026. sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
  1027. if (!sec)
  1028. return -ENOMEM;
  1029. qm = &sec->qm;
  1030. ret = sec_qm_init(qm, pdev);
  1031. if (ret) {
  1032. pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
  1033. return ret;
  1034. }
  1035. sec->ctx_q_num = ctx_q_num;
  1036. sec_iommu_used_check(sec);
  1037. ret = sec_probe_init(sec);
  1038. if (ret) {
  1039. pci_err(pdev, "Failed to probe!\n");
  1040. goto err_qm_uninit;
  1041. }
  1042. ret = hisi_qm_start(qm);
  1043. if (ret) {
  1044. pci_err(pdev, "Failed to start sec qm!\n");
  1045. goto err_probe_uninit;
  1046. }
  1047. ret = sec_debugfs_init(qm);
  1048. if (ret)
  1049. pci_warn(pdev, "Failed to init debugfs!\n");
  1050. hisi_qm_add_list(qm, &sec_devices);
  1051. ret = hisi_qm_alg_register(qm, &sec_devices, ctx_q_num);
  1052. if (ret < 0) {
  1053. pr_err("Failed to register driver to crypto.\n");
  1054. goto err_qm_del_list;
  1055. }
  1056. if (qm->uacce) {
  1057. ret = uacce_register(qm->uacce);
  1058. if (ret) {
  1059. pci_err(pdev, "failed to register uacce (%d)!\n", ret);
  1060. goto err_alg_unregister;
  1061. }
  1062. }
  1063. if (qm->fun_type == QM_HW_PF && vfs_num) {
  1064. ret = hisi_qm_sriov_enable(pdev, vfs_num);
  1065. if (ret < 0)
  1066. goto err_alg_unregister;
  1067. }
  1068. hisi_qm_pm_init(qm);
  1069. return 0;
  1070. err_alg_unregister:
  1071. hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num);
  1072. err_qm_del_list:
  1073. hisi_qm_del_list(qm, &sec_devices);
  1074. sec_debugfs_exit(qm);
  1075. hisi_qm_stop(qm, QM_NORMAL);
  1076. err_probe_uninit:
  1077. sec_probe_uninit(qm);
  1078. err_qm_uninit:
  1079. sec_qm_uninit(qm);
  1080. return ret;
  1081. }
  1082. static void sec_remove(struct pci_dev *pdev)
  1083. {
  1084. struct hisi_qm *qm = pci_get_drvdata(pdev);
  1085. hisi_qm_pm_uninit(qm);
  1086. hisi_qm_wait_task_finish(qm, &sec_devices);
  1087. hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num);
  1088. hisi_qm_del_list(qm, &sec_devices);
  1089. if (qm->fun_type == QM_HW_PF && qm->vfs_num)
  1090. hisi_qm_sriov_disable(pdev, true);
  1091. sec_debugfs_exit(qm);
  1092. (void)hisi_qm_stop(qm, QM_NORMAL);
  1093. sec_probe_uninit(qm);
  1094. sec_qm_uninit(qm);
  1095. }
  1096. static const struct dev_pm_ops sec_pm_ops = {
  1097. SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
  1098. };
  1099. static const struct pci_error_handlers sec_err_handler = {
  1100. .error_detected = hisi_qm_dev_err_detected,
  1101. .slot_reset = hisi_qm_dev_slot_reset,
  1102. .reset_prepare = hisi_qm_reset_prepare,
  1103. .reset_done = hisi_qm_reset_done,
  1104. };
  1105. static struct pci_driver sec_pci_driver = {
  1106. .name = "hisi_sec2",
  1107. .id_table = sec_dev_ids,
  1108. .probe = sec_probe,
  1109. .remove = sec_remove,
  1110. .err_handler = &sec_err_handler,
  1111. .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
  1112. hisi_qm_sriov_configure : NULL,
  1113. .shutdown = hisi_qm_dev_shutdown,
  1114. .driver.pm = &sec_pm_ops,
  1115. };
  1116. struct pci_driver *hisi_sec_get_pf_driver(void)
  1117. {
  1118. return &sec_pci_driver;
  1119. }
  1120. EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver);
  1121. static void sec_register_debugfs(void)
  1122. {
  1123. if (!debugfs_initialized())
  1124. return;
  1125. sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
  1126. }
  1127. static void sec_unregister_debugfs(void)
  1128. {
  1129. debugfs_remove_recursive(sec_debugfs_root);
  1130. }
  1131. static int __init sec_init(void)
  1132. {
  1133. int ret;
  1134. hisi_qm_init_list(&sec_devices);
  1135. sec_register_debugfs();
  1136. ret = pci_register_driver(&sec_pci_driver);
  1137. if (ret < 0) {
  1138. sec_unregister_debugfs();
  1139. pr_err("Failed to register pci driver.\n");
  1140. return ret;
  1141. }
  1142. return 0;
  1143. }
  1144. static void __exit sec_exit(void)
  1145. {
  1146. pci_unregister_driver(&sec_pci_driver);
  1147. sec_unregister_debugfs();
  1148. }
  1149. module_init(sec_init);
  1150. module_exit(sec_exit);
  1151. MODULE_LICENSE("GPL v2");
  1152. MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
  1153. MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
  1154. MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
  1155. MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
  1156. MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");