debugfs.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2022 HiSilicon Limited. */
  3. #include <linux/hisi_acc_qm.h>
  4. #include "qm_common.h"
  5. #define QM_DFX_BASE 0x0100000
  6. #define QM_DFX_STATE1 0x0104000
  7. #define QM_DFX_STATE2 0x01040C8
  8. #define QM_DFX_COMMON 0x0000
  9. #define QM_DFX_BASE_LEN 0x5A
  10. #define QM_DFX_STATE1_LEN 0x2E
  11. #define QM_DFX_STATE2_LEN 0x11
  12. #define QM_DFX_COMMON_LEN 0xC3
  13. #define QM_DFX_REGS_LEN 4UL
  14. #define QM_DBG_TMP_BUF_LEN 22
  15. #define QM_XQC_ADDR_MASK GENMASK(31, 0)
  16. #define CURRENT_FUN_MASK GENMASK(5, 0)
  17. #define CURRENT_Q_MASK GENMASK(31, 16)
  18. #define QM_SQE_ADDR_MASK GENMASK(7, 0)
  19. #define QM_DFX_MB_CNT_VF 0x104010
  20. #define QM_DFX_DB_CNT_VF 0x104020
  21. #define QM_DFX_SQE_CNT_VF_SQN 0x104030
  22. #define QM_DFX_CQE_CNT_VF_CQN 0x104040
  23. #define QM_DFX_QN_SHIFT 16
  24. #define QM_DFX_CNT_CLR_CE 0x100118
  25. #define QM_DBG_WRITE_LEN 1024
  26. #define QM_IN_IDLE_ST_REG 0x1040e4
  27. #define QM_IN_IDLE_STATE 0x1
  28. static const char * const qm_debug_file_name[] = {
  29. [CURRENT_QM] = "current_qm",
  30. [CURRENT_Q] = "current_q",
  31. [CLEAR_ENABLE] = "clear_enable",
  32. };
  33. static const char * const qm_s[] = {
  34. "work", "stop",
  35. };
  36. struct qm_dfx_item {
  37. const char *name;
  38. u32 offset;
  39. };
  40. struct qm_cmd_dump_item {
  41. const char *cmd;
  42. char *info_name;
  43. int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name);
  44. };
  45. static struct qm_dfx_item qm_dfx_files[] = {
  46. {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
  47. {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
  48. {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
  49. {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
  50. {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
  51. };
  52. #define CNT_CYC_REGS_NUM 10
  53. static const struct debugfs_reg32 qm_dfx_regs[] = {
  54. /* XXX_CNT are reading clear register */
  55. {"QM_ECC_1BIT_CNT ", 0x104000},
  56. {"QM_ECC_MBIT_CNT ", 0x104008},
  57. {"QM_DFX_MB_CNT ", 0x104018},
  58. {"QM_DFX_DB_CNT ", 0x104028},
  59. {"QM_DFX_SQE_CNT ", 0x104038},
  60. {"QM_DFX_CQE_CNT ", 0x104048},
  61. {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050},
  62. {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058},
  63. {"QM_DFX_ACC_FINISH_CNT ", 0x104060},
  64. {"QM_DFX_CQE_ERR_CNT ", 0x1040b4},
  65. {"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
  66. {"QM_ECC_1BIT_INF ", 0x104004},
  67. {"QM_ECC_MBIT_INF ", 0x10400c},
  68. {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0},
  69. {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4},
  70. {"QM_DFX_AXI_RDY_VLD ", 0x1040a8},
  71. {"QM_DFX_FF_ST0 ", 0x1040c8},
  72. {"QM_DFX_FF_ST1 ", 0x1040cc},
  73. {"QM_DFX_FF_ST2 ", 0x1040d0},
  74. {"QM_DFX_FF_ST3 ", 0x1040d4},
  75. {"QM_DFX_FF_ST4 ", 0x1040d8},
  76. {"QM_DFX_FF_ST5 ", 0x1040dc},
  77. {"QM_DFX_FF_ST6 ", 0x1040e0},
  78. {"QM_IN_IDLE_ST ", 0x1040e4},
  79. {"QM_CACHE_CTL ", 0x100050},
  80. {"QM_TIMEOUT_CFG ", 0x100070},
  81. {"QM_DB_TIMEOUT_CFG ", 0x100074},
  82. {"QM_FLR_PENDING_TIME_CFG ", 0x100078},
  83. {"QM_ARUSR_MCFG1 ", 0x100088},
  84. {"QM_AWUSR_MCFG1 ", 0x100098},
  85. {"QM_AXI_M_CFG_ENABLE ", 0x1000B0},
  86. {"QM_RAS_CE_THRESHOLD ", 0x1000F8},
  87. {"QM_AXI_TIMEOUT_CTRL ", 0x100120},
  88. {"QM_AXI_TIMEOUT_STATUS ", 0x100124},
  89. {"QM_CQE_AGGR_TIMEOUT_CTRL ", 0x100144},
  90. {"ACC_RAS_MSI_INT_SEL ", 0x1040fc},
  91. {"QM_CQE_OUT ", 0x104100},
  92. {"QM_EQE_OUT ", 0x104104},
  93. {"QM_AEQE_OUT ", 0x104108},
  94. {"QM_DB_INFO0 ", 0x104180},
  95. {"QM_DB_INFO1 ", 0x104184},
  96. {"QM_AM_CTRL_GLOBAL ", 0x300000},
  97. {"QM_AM_CURR_PORT_STS ", 0x300100},
  98. {"QM_AM_CURR_TRANS_RETURN ", 0x300150},
  99. {"QM_AM_CURR_RD_MAX_TXID ", 0x300154},
  100. {"QM_AM_CURR_WR_MAX_TXID ", 0x300158},
  101. {"QM_AM_ALARM_RRESP ", 0x300180},
  102. {"QM_AM_ALARM_BRESP ", 0x300184},
  103. };
  104. static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
  105. {"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
  106. };
  107. /* define the QM's dfx regs region and region length */
  108. static struct dfx_diff_registers qm_diff_regs[] = {
  109. {
  110. .reg_offset = QM_DFX_BASE,
  111. .reg_len = QM_DFX_BASE_LEN,
  112. }, {
  113. .reg_offset = QM_DFX_STATE1,
  114. .reg_len = QM_DFX_STATE1_LEN,
  115. }, {
  116. .reg_offset = QM_DFX_STATE2,
  117. .reg_len = QM_DFX_STATE2_LEN,
  118. }, {
  119. .reg_offset = QM_DFX_COMMON,
  120. .reg_len = QM_DFX_COMMON_LEN,
  121. },
  122. };
  123. static struct hisi_qm *file_to_qm(struct debugfs_file *file)
  124. {
  125. struct qm_debug *debug = file->debug;
  126. return container_of(debug, struct hisi_qm, debug);
  127. }
  128. static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
  129. size_t count, loff_t *pos)
  130. {
  131. char buf[QM_DBG_READ_LEN];
  132. int len;
  133. len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
  134. "Please echo help to cmd to get help information");
  135. return simple_read_from_buffer(buffer, count, pos, buf, len);
  136. }
  137. static void dump_show(struct hisi_qm *qm, void *info,
  138. unsigned int info_size, char *info_name)
  139. {
  140. struct device *dev = &qm->pdev->dev;
  141. u8 *info_curr = info;
  142. u32 i;
  143. #define BYTE_PER_DW 4
  144. dev_info(dev, "%s DUMP\n", info_name);
  145. for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
  146. pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
  147. *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
  148. }
  149. }
  150. static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
  151. {
  152. struct device *dev = &qm->pdev->dev;
  153. struct qm_sqc sqc;
  154. u32 qp_id;
  155. int ret;
  156. if (!s)
  157. return -EINVAL;
  158. ret = kstrtou32(s, 0, &qp_id);
  159. if (ret || qp_id >= qm->qp_num) {
  160. dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
  161. return -EINVAL;
  162. }
  163. ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
  164. if (!ret) {
  165. sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
  166. sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
  167. dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
  168. return 0;
  169. }
  170. down_read(&qm->qps_lock);
  171. if (qm->sqc) {
  172. memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc));
  173. sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
  174. sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
  175. dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
  176. }
  177. up_read(&qm->qps_lock);
  178. return 0;
  179. }
  180. static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
  181. {
  182. struct device *dev = &qm->pdev->dev;
  183. struct qm_cqc cqc;
  184. u32 qp_id;
  185. int ret;
  186. if (!s)
  187. return -EINVAL;
  188. ret = kstrtou32(s, 0, &qp_id);
  189. if (ret || qp_id >= qm->qp_num) {
  190. dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
  191. return -EINVAL;
  192. }
  193. ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
  194. if (!ret) {
  195. cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
  196. cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
  197. dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
  198. return 0;
  199. }
  200. down_read(&qm->qps_lock);
  201. if (qm->cqc) {
  202. memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc));
  203. cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
  204. cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
  205. dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
  206. }
  207. up_read(&qm->qps_lock);
  208. return 0;
  209. }
  210. static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
  211. {
  212. struct device *dev = &qm->pdev->dev;
  213. struct qm_aeqc aeqc;
  214. struct qm_eqc eqc;
  215. size_t size;
  216. void *xeqc;
  217. int ret;
  218. u8 cmd;
  219. if (strsep(&s, " ")) {
  220. dev_err(dev, "Please do not input extra characters!\n");
  221. return -EINVAL;
  222. }
  223. if (!strcmp(name, "EQC")) {
  224. cmd = QM_MB_CMD_EQC;
  225. size = sizeof(struct qm_eqc);
  226. xeqc = &eqc;
  227. } else {
  228. cmd = QM_MB_CMD_AEQC;
  229. size = sizeof(struct qm_aeqc);
  230. xeqc = &aeqc;
  231. }
  232. ret = qm_set_and_get_xqc(qm, cmd, xeqc, 0, 1);
  233. if (ret)
  234. return ret;
  235. aeqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
  236. aeqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
  237. eqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
  238. eqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
  239. dump_show(qm, xeqc, size, name);
  240. return ret;
  241. }
  242. static int q_dump_param_parse(struct hisi_qm *qm, char *s,
  243. u32 *e_id, u32 *q_id, u16 q_depth)
  244. {
  245. struct device *dev = &qm->pdev->dev;
  246. unsigned int qp_num = qm->qp_num;
  247. char *presult;
  248. int ret;
  249. presult = strsep(&s, " ");
  250. if (!presult) {
  251. dev_err(dev, "Please input qp number!\n");
  252. return -EINVAL;
  253. }
  254. ret = kstrtou32(presult, 0, q_id);
  255. if (ret || *q_id >= qp_num) {
  256. dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
  257. return -EINVAL;
  258. }
  259. presult = strsep(&s, " ");
  260. if (!presult) {
  261. dev_err(dev, "Please input sqe number!\n");
  262. return -EINVAL;
  263. }
  264. ret = kstrtou32(presult, 0, e_id);
  265. if (ret || *e_id >= q_depth) {
  266. dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
  267. return -EINVAL;
  268. }
  269. if (strsep(&s, " ")) {
  270. dev_err(dev, "Please do not input extra characters!\n");
  271. return -EINVAL;
  272. }
  273. return 0;
  274. }
  275. static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
  276. {
  277. u16 sq_depth = qm->qp_array->sq_depth;
  278. struct hisi_qp *qp;
  279. u32 qp_id, sqe_id;
  280. void *sqe;
  281. int ret;
  282. ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
  283. if (ret)
  284. return ret;
  285. sqe = kzalloc(qm->sqe_size, GFP_KERNEL);
  286. if (!sqe)
  287. return -ENOMEM;
  288. qp = &qm->qp_array[qp_id];
  289. memcpy(sqe, qp->sqe + sqe_id * qm->sqe_size, qm->sqe_size);
  290. memset(sqe + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
  291. qm->debug.sqe_mask_len);
  292. dump_show(qm, sqe, qm->sqe_size, name);
  293. kfree(sqe);
  294. return 0;
  295. }
  296. static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
  297. {
  298. struct qm_cqe *cqe_curr;
  299. struct hisi_qp *qp;
  300. u32 qp_id, cqe_id;
  301. int ret;
  302. ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
  303. if (ret)
  304. return ret;
  305. qp = &qm->qp_array[qp_id];
  306. cqe_curr = qp->cqe + cqe_id;
  307. dump_show(qm, cqe_curr, sizeof(struct qm_cqe), name);
  308. return 0;
  309. }
  310. static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
  311. {
  312. struct device *dev = &qm->pdev->dev;
  313. u16 xeq_depth;
  314. size_t size;
  315. void *xeqe;
  316. u32 xeqe_id;
  317. int ret;
  318. if (!s)
  319. return -EINVAL;
  320. ret = kstrtou32(s, 0, &xeqe_id);
  321. if (ret)
  322. return -EINVAL;
  323. if (!strcmp(name, "EQE")) {
  324. xeq_depth = qm->eq_depth;
  325. size = sizeof(struct qm_eqe);
  326. } else {
  327. xeq_depth = qm->aeq_depth;
  328. size = sizeof(struct qm_aeqe);
  329. }
  330. if (xeqe_id >= xeq_depth) {
  331. dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1);
  332. return -EINVAL;
  333. }
  334. down_read(&qm->qps_lock);
  335. if (qm->eqe && !strcmp(name, "EQE")) {
  336. xeqe = qm->eqe + xeqe_id;
  337. } else if (qm->aeqe && !strcmp(name, "AEQE")) {
  338. xeqe = qm->aeqe + xeqe_id;
  339. } else {
  340. ret = -EINVAL;
  341. goto err_unlock;
  342. }
  343. dump_show(qm, xeqe, size, name);
  344. err_unlock:
  345. up_read(&qm->qps_lock);
  346. return ret;
  347. }
  348. static int qm_dbg_help(struct hisi_qm *qm, char *s)
  349. {
  350. struct device *dev = &qm->pdev->dev;
  351. if (strsep(&s, " ")) {
  352. dev_err(dev, "Please do not input extra characters!\n");
  353. return -EINVAL;
  354. }
  355. dev_info(dev, "available commands:\n");
  356. dev_info(dev, "sqc <num>\n");
  357. dev_info(dev, "cqc <num>\n");
  358. dev_info(dev, "eqc\n");
  359. dev_info(dev, "aeqc\n");
  360. dev_info(dev, "sq <num> <e>\n");
  361. dev_info(dev, "cq <num> <e>\n");
  362. dev_info(dev, "eq <e>\n");
  363. dev_info(dev, "aeq <e>\n");
  364. return 0;
  365. }
  366. static const struct qm_cmd_dump_item qm_cmd_dump_table[] = {
  367. {
  368. .cmd = "sqc",
  369. .info_name = "SQC",
  370. .dump_fn = qm_sqc_dump,
  371. }, {
  372. .cmd = "cqc",
  373. .info_name = "CQC",
  374. .dump_fn = qm_cqc_dump,
  375. }, {
  376. .cmd = "eqc",
  377. .info_name = "EQC",
  378. .dump_fn = qm_eqc_aeqc_dump,
  379. }, {
  380. .cmd = "aeqc",
  381. .info_name = "AEQC",
  382. .dump_fn = qm_eqc_aeqc_dump,
  383. }, {
  384. .cmd = "sq",
  385. .info_name = "SQE",
  386. .dump_fn = qm_sq_dump,
  387. }, {
  388. .cmd = "cq",
  389. .info_name = "CQE",
  390. .dump_fn = qm_cq_dump,
  391. }, {
  392. .cmd = "eq",
  393. .info_name = "EQE",
  394. .dump_fn = qm_eq_aeq_dump,
  395. }, {
  396. .cmd = "aeq",
  397. .info_name = "AEQE",
  398. .dump_fn = qm_eq_aeq_dump,
  399. },
  400. };
  401. static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
  402. {
  403. struct device *dev = &qm->pdev->dev;
  404. char *presult, *s, *s_tmp;
  405. int table_size, i, ret;
  406. s = kstrdup(cmd_buf, GFP_KERNEL);
  407. if (!s)
  408. return -ENOMEM;
  409. s_tmp = s;
  410. presult = strsep(&s, " ");
  411. if (!presult) {
  412. ret = -EINVAL;
  413. goto err_buffer_free;
  414. }
  415. if (!strcmp(presult, "help")) {
  416. ret = qm_dbg_help(qm, s);
  417. goto err_buffer_free;
  418. }
  419. table_size = ARRAY_SIZE(qm_cmd_dump_table);
  420. for (i = 0; i < table_size; i++) {
  421. if (!strcmp(presult, qm_cmd_dump_table[i].cmd)) {
  422. ret = qm_cmd_dump_table[i].dump_fn(qm, s,
  423. qm_cmd_dump_table[i].info_name);
  424. break;
  425. }
  426. }
  427. if (i == table_size) {
  428. dev_info(dev, "Please echo help\n");
  429. ret = -EINVAL;
  430. }
  431. err_buffer_free:
  432. kfree(s_tmp);
  433. return ret;
  434. }
  435. static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
  436. size_t count, loff_t *pos)
  437. {
  438. struct hisi_qm *qm = filp->private_data;
  439. char *cmd_buf, *cmd_buf_tmp;
  440. int ret;
  441. if (*pos)
  442. return 0;
  443. ret = hisi_qm_get_dfx_access(qm);
  444. if (ret)
  445. return ret;
  446. /* Judge if the instance is being reset. */
  447. if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
  448. ret = 0;
  449. goto put_dfx_access;
  450. }
  451. if (count > QM_DBG_WRITE_LEN) {
  452. ret = -ENOSPC;
  453. goto put_dfx_access;
  454. }
  455. cmd_buf = memdup_user_nul(buffer, count);
  456. if (IS_ERR(cmd_buf)) {
  457. ret = PTR_ERR(cmd_buf);
  458. goto put_dfx_access;
  459. }
  460. cmd_buf_tmp = strchr(cmd_buf, '\n');
  461. if (cmd_buf_tmp) {
  462. *cmd_buf_tmp = '\0';
  463. count = cmd_buf_tmp - cmd_buf + 1;
  464. }
  465. ret = qm_cmd_write_dump(qm, cmd_buf);
  466. if (ret) {
  467. kfree(cmd_buf);
  468. goto put_dfx_access;
  469. }
  470. kfree(cmd_buf);
  471. ret = count;
  472. put_dfx_access:
  473. hisi_qm_put_dfx_access(qm);
  474. return ret;
  475. }
  476. static const struct file_operations qm_cmd_fops = {
  477. .owner = THIS_MODULE,
  478. .open = simple_open,
  479. .read = qm_cmd_read,
  480. .write = qm_cmd_write,
  481. };
  482. /**
  483. * hisi_qm_regs_dump() - Dump registers's value.
  484. * @s: debugfs file handle.
  485. * @regset: accelerator registers information.
  486. *
  487. * Dump accelerator registers.
  488. */
  489. void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
  490. {
  491. struct pci_dev *pdev = to_pci_dev(regset->dev);
  492. struct hisi_qm *qm = pci_get_drvdata(pdev);
  493. const struct debugfs_reg32 *regs = regset->regs;
  494. int regs_len = regset->nregs;
  495. int i, ret;
  496. u32 val;
  497. ret = hisi_qm_get_dfx_access(qm);
  498. if (ret)
  499. return;
  500. for (i = 0; i < regs_len; i++) {
  501. val = readl(regset->base + regs[i].offset);
  502. seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
  503. }
  504. hisi_qm_put_dfx_access(qm);
  505. }
  506. EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
  507. static int qm_regs_show(struct seq_file *s, void *unused)
  508. {
  509. struct hisi_qm *qm = s->private;
  510. struct debugfs_regset32 regset;
  511. if (qm->fun_type == QM_HW_PF) {
  512. regset.regs = qm_dfx_regs;
  513. regset.nregs = ARRAY_SIZE(qm_dfx_regs);
  514. } else {
  515. regset.regs = qm_vf_dfx_regs;
  516. regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
  517. }
  518. regset.base = qm->io_base;
  519. regset.dev = &qm->pdev->dev;
  520. hisi_qm_regs_dump(s, &regset);
  521. return 0;
  522. }
  523. DEFINE_SHOW_ATTRIBUTE(qm_regs);
  524. static u32 current_q_read(struct hisi_qm *qm)
  525. {
  526. return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
  527. }
  528. static int current_q_write(struct hisi_qm *qm, u32 val)
  529. {
  530. u32 tmp;
  531. if (val >= qm->debug.curr_qm_qp_num)
  532. return -EINVAL;
  533. tmp = val << QM_DFX_QN_SHIFT |
  534. (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
  535. writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
  536. tmp = val << QM_DFX_QN_SHIFT |
  537. (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
  538. writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
  539. return 0;
  540. }
  541. static u32 clear_enable_read(struct hisi_qm *qm)
  542. {
  543. return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
  544. }
  545. /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
  546. static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
  547. {
  548. if (rd_clr_ctrl > 1)
  549. return -EINVAL;
  550. writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
  551. return 0;
  552. }
  553. static u32 current_qm_read(struct hisi_qm *qm)
  554. {
  555. return readl(qm->io_base + QM_DFX_MB_CNT_VF);
  556. }
  557. static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
  558. {
  559. u32 remain_q_num, vfq_num;
  560. u32 num_vfs = qm->vfs_num;
  561. vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
  562. if (vfq_num >= qm->max_qp_num)
  563. return qm->max_qp_num;
  564. remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
  565. if (vfq_num + remain_q_num <= qm->max_qp_num)
  566. return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
  567. /*
  568. * if vfq_num + remain_q_num > max_qp_num, the last VFs,
  569. * each with one more queue.
  570. */
  571. return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
  572. }
  573. static int current_qm_write(struct hisi_qm *qm, u32 val)
  574. {
  575. u32 tmp;
  576. if (val > qm->vfs_num)
  577. return -EINVAL;
  578. /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
  579. if (!val)
  580. qm->debug.curr_qm_qp_num = qm->qp_num;
  581. else
  582. qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
  583. writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
  584. writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
  585. tmp = val |
  586. (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
  587. writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
  588. tmp = val |
  589. (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
  590. writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
  591. return 0;
  592. }
  593. static ssize_t qm_debug_read(struct file *filp, char __user *buf,
  594. size_t count, loff_t *pos)
  595. {
  596. struct debugfs_file *file = filp->private_data;
  597. enum qm_debug_file index = file->index;
  598. struct hisi_qm *qm = file_to_qm(file);
  599. char tbuf[QM_DBG_TMP_BUF_LEN];
  600. u32 val;
  601. int ret;
  602. ret = hisi_qm_get_dfx_access(qm);
  603. if (ret)
  604. return ret;
  605. mutex_lock(&file->lock);
  606. switch (index) {
  607. case CURRENT_QM:
  608. val = current_qm_read(qm);
  609. break;
  610. case CURRENT_Q:
  611. val = current_q_read(qm);
  612. break;
  613. case CLEAR_ENABLE:
  614. val = clear_enable_read(qm);
  615. break;
  616. default:
  617. goto err_input;
  618. }
  619. mutex_unlock(&file->lock);
  620. hisi_qm_put_dfx_access(qm);
  621. ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
  622. return simple_read_from_buffer(buf, count, pos, tbuf, ret);
  623. err_input:
  624. mutex_unlock(&file->lock);
  625. hisi_qm_put_dfx_access(qm);
  626. return -EINVAL;
  627. }
  628. static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
  629. size_t count, loff_t *pos)
  630. {
  631. struct debugfs_file *file = filp->private_data;
  632. enum qm_debug_file index = file->index;
  633. struct hisi_qm *qm = file_to_qm(file);
  634. unsigned long val;
  635. char tbuf[QM_DBG_TMP_BUF_LEN];
  636. int len, ret;
  637. if (*pos != 0)
  638. return 0;
  639. if (count >= QM_DBG_TMP_BUF_LEN)
  640. return -ENOSPC;
  641. len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
  642. count);
  643. if (len < 0)
  644. return len;
  645. tbuf[len] = '\0';
  646. if (kstrtoul(tbuf, 0, &val))
  647. return -EFAULT;
  648. ret = hisi_qm_get_dfx_access(qm);
  649. if (ret)
  650. return ret;
  651. mutex_lock(&file->lock);
  652. switch (index) {
  653. case CURRENT_QM:
  654. ret = current_qm_write(qm, val);
  655. break;
  656. case CURRENT_Q:
  657. ret = current_q_write(qm, val);
  658. break;
  659. case CLEAR_ENABLE:
  660. ret = clear_enable_write(qm, val);
  661. break;
  662. default:
  663. ret = -EINVAL;
  664. }
  665. mutex_unlock(&file->lock);
  666. hisi_qm_put_dfx_access(qm);
  667. if (ret)
  668. return ret;
  669. return count;
  670. }
  671. static const struct file_operations qm_debug_fops = {
  672. .owner = THIS_MODULE,
  673. .open = simple_open,
  674. .read = qm_debug_read,
  675. .write = qm_debug_write,
  676. };
  677. static void dfx_regs_uninit(struct hisi_qm *qm,
  678. struct dfx_diff_registers *dregs, int reg_len)
  679. {
  680. int i;
  681. if (!dregs)
  682. return;
  683. /* Setting the pointer is NULL to prevent double free */
  684. for (i = 0; i < reg_len; i++) {
  685. if (!dregs[i].regs)
  686. continue;
  687. kfree(dregs[i].regs);
  688. dregs[i].regs = NULL;
  689. }
  690. kfree(dregs);
  691. }
  692. static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
  693. const struct dfx_diff_registers *cregs, u32 reg_len)
  694. {
  695. struct dfx_diff_registers *diff_regs;
  696. u32 j, base_offset;
  697. int i;
  698. diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
  699. if (!diff_regs)
  700. return ERR_PTR(-ENOMEM);
  701. for (i = 0; i < reg_len; i++) {
  702. if (!cregs[i].reg_len)
  703. continue;
  704. diff_regs[i].reg_offset = cregs[i].reg_offset;
  705. diff_regs[i].reg_len = cregs[i].reg_len;
  706. diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
  707. GFP_KERNEL);
  708. if (!diff_regs[i].regs)
  709. goto alloc_error;
  710. for (j = 0; j < diff_regs[i].reg_len; j++) {
  711. base_offset = diff_regs[i].reg_offset +
  712. j * QM_DFX_REGS_LEN;
  713. diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
  714. }
  715. }
  716. return diff_regs;
  717. alloc_error:
  718. while (i > 0) {
  719. i--;
  720. kfree(diff_regs[i].regs);
  721. }
  722. kfree(diff_regs);
  723. return ERR_PTR(-ENOMEM);
  724. }
  725. static int qm_diff_regs_init(struct hisi_qm *qm,
  726. struct dfx_diff_registers *dregs, u32 reg_len)
  727. {
  728. int ret;
  729. qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
  730. if (IS_ERR(qm->debug.qm_diff_regs)) {
  731. ret = PTR_ERR(qm->debug.qm_diff_regs);
  732. qm->debug.qm_diff_regs = NULL;
  733. return ret;
  734. }
  735. qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
  736. if (IS_ERR(qm->debug.acc_diff_regs)) {
  737. dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
  738. ret = PTR_ERR(qm->debug.acc_diff_regs);
  739. qm->debug.acc_diff_regs = NULL;
  740. return ret;
  741. }
  742. return 0;
  743. }
  744. static void qm_last_regs_uninit(struct hisi_qm *qm)
  745. {
  746. struct qm_debug *debug = &qm->debug;
  747. if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
  748. return;
  749. kfree(debug->qm_last_words);
  750. debug->qm_last_words = NULL;
  751. }
  752. static int qm_last_regs_init(struct hisi_qm *qm)
  753. {
  754. int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
  755. struct qm_debug *debug = &qm->debug;
  756. int i;
  757. if (qm->fun_type == QM_HW_VF)
  758. return 0;
  759. debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
  760. if (!debug->qm_last_words)
  761. return -ENOMEM;
  762. for (i = 0; i < dfx_regs_num; i++) {
  763. debug->qm_last_words[i] = readl_relaxed(qm->io_base +
  764. qm_dfx_regs[i].offset);
  765. }
  766. return 0;
  767. }
  768. static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
  769. {
  770. dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
  771. qm->debug.acc_diff_regs = NULL;
  772. dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
  773. qm->debug.qm_diff_regs = NULL;
  774. }
  775. /**
  776. * hisi_qm_regs_debugfs_init() - Allocate memory for registers.
  777. * @qm: device qm handle.
  778. * @dregs: diff registers handle.
  779. * @reg_len: diff registers region length.
  780. */
  781. int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
  782. struct dfx_diff_registers *dregs, u32 reg_len)
  783. {
  784. int ret;
  785. if (!qm || !dregs)
  786. return -EINVAL;
  787. if (qm->fun_type != QM_HW_PF)
  788. return 0;
  789. ret = qm_last_regs_init(qm);
  790. if (ret) {
  791. dev_info(&qm->pdev->dev, "failed to init qm words memory!\n");
  792. return ret;
  793. }
  794. ret = qm_diff_regs_init(qm, dregs, reg_len);
  795. if (ret) {
  796. qm_last_regs_uninit(qm);
  797. return ret;
  798. }
  799. return 0;
  800. }
  801. EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init);
  802. /**
  803. * hisi_qm_regs_debugfs_uninit() - Free memory for registers.
  804. * @qm: device qm handle.
  805. * @reg_len: diff registers region length.
  806. */
  807. void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len)
  808. {
  809. if (!qm || qm->fun_type != QM_HW_PF)
  810. return;
  811. qm_diff_regs_uninit(qm, reg_len);
  812. qm_last_regs_uninit(qm);
  813. }
  814. EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit);
  815. /**
  816. * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
  817. * @qm: device qm handle.
  818. * @s: Debugfs file handle.
  819. * @dregs: diff registers handle.
  820. * @regs_len: diff registers region length.
  821. */
  822. void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
  823. struct dfx_diff_registers *dregs, u32 regs_len)
  824. {
  825. u32 j, val, base_offset;
  826. int i, ret;
  827. if (!qm || !s || !dregs)
  828. return;
  829. ret = hisi_qm_get_dfx_access(qm);
  830. if (ret)
  831. return;
  832. down_read(&qm->qps_lock);
  833. for (i = 0; i < regs_len; i++) {
  834. if (!dregs[i].reg_len)
  835. continue;
  836. for (j = 0; j < dregs[i].reg_len; j++) {
  837. base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
  838. val = readl(qm->io_base + base_offset);
  839. if (val != dregs[i].regs[j])
  840. seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
  841. base_offset, dregs[i].regs[j], val);
  842. }
  843. }
  844. up_read(&qm->qps_lock);
  845. hisi_qm_put_dfx_access(qm);
  846. }
  847. EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
  848. void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
  849. {
  850. struct qm_debug *debug = &qm->debug;
  851. struct pci_dev *pdev = qm->pdev;
  852. u32 val;
  853. int i;
  854. if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
  855. return;
  856. for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
  857. val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
  858. if (debug->qm_last_words[i] != val)
  859. pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
  860. qm_dfx_regs[i].name, debug->qm_last_words[i], val);
  861. }
  862. }
  863. static int qm_diff_regs_show(struct seq_file *s, void *unused)
  864. {
  865. struct hisi_qm *qm = s->private;
  866. hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
  867. ARRAY_SIZE(qm_diff_regs));
  868. return 0;
  869. }
  870. DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
  871. static int qm_state_show(struct seq_file *s, void *unused)
  872. {
  873. struct hisi_qm *qm = s->private;
  874. u32 val;
  875. int ret;
  876. /* If device is in suspended, directly return the idle state. */
  877. ret = hisi_qm_get_dfx_access(qm);
  878. if (!ret) {
  879. val = readl(qm->io_base + QM_IN_IDLE_ST_REG);
  880. hisi_qm_put_dfx_access(qm);
  881. } else if (ret == -EAGAIN) {
  882. val = QM_IN_IDLE_STATE;
  883. } else {
  884. return ret;
  885. }
  886. seq_printf(s, "%u\n", val);
  887. return 0;
  888. }
  889. DEFINE_SHOW_ATTRIBUTE(qm_state);
  890. static ssize_t qm_status_read(struct file *filp, char __user *buffer,
  891. size_t count, loff_t *pos)
  892. {
  893. struct hisi_qm *qm = filp->private_data;
  894. char buf[QM_DBG_READ_LEN];
  895. int val, len;
  896. val = atomic_read(&qm->status.flags);
  897. len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
  898. return simple_read_from_buffer(buffer, count, pos, buf, len);
  899. }
  900. static const struct file_operations qm_status_fops = {
  901. .owner = THIS_MODULE,
  902. .open = simple_open,
  903. .read = qm_status_read,
  904. };
  905. static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
  906. enum qm_debug_file index)
  907. {
  908. struct debugfs_file *file = qm->debug.files + index;
  909. file->index = index;
  910. mutex_init(&file->lock);
  911. file->debug = &qm->debug;
  912. debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
  913. &qm_debug_fops);
  914. }
  915. static int qm_debugfs_atomic64_set(void *data, u64 val)
  916. {
  917. if (val)
  918. return -EINVAL;
  919. atomic64_set((atomic64_t *)data, 0);
  920. return 0;
  921. }
  922. static int qm_debugfs_atomic64_get(void *data, u64 *val)
  923. {
  924. *val = atomic64_read((atomic64_t *)data);
  925. return 0;
  926. }
  927. DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
  928. qm_debugfs_atomic64_set, "%llu\n");
  929. /**
  930. * hisi_qm_debug_init() - Initialize qm related debugfs files.
  931. * @qm: The qm for which we want to add debugfs files.
  932. *
  933. * Create qm related debugfs files.
  934. */
  935. void hisi_qm_debug_init(struct hisi_qm *qm)
  936. {
  937. struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
  938. struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx;
  939. struct qm_dfx *dfx = &qm->debug.dfx;
  940. struct dentry *qm_d;
  941. void *data;
  942. int i;
  943. qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
  944. qm->debug.qm_d = qm_d;
  945. /* only show this in PF */
  946. if (qm->fun_type == QM_HW_PF) {
  947. debugfs_create_file("qm_state", 0444, qm->debug.qm_d,
  948. qm, &qm_state_fops);
  949. qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
  950. for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
  951. qm_create_debugfs_file(qm, qm->debug.qm_d, i);
  952. }
  953. if (qm_regs)
  954. debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
  955. qm, &qm_diff_regs_fops);
  956. debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
  957. debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
  958. debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
  959. &qm_status_fops);
  960. debugfs_create_u32("dev_state", 0444, qm->debug.qm_d, &dev_dfx->dev_state);
  961. debugfs_create_u32("dev_timeout", 0644, qm->debug.qm_d, &dev_dfx->dev_timeout);
  962. for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
  963. data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
  964. debugfs_create_file(qm_dfx_files[i].name,
  965. 0644,
  966. qm_d,
  967. data,
  968. &qm_atomic64_ops);
  969. }
  970. if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
  971. hisi_qm_set_algqos_init(qm);
  972. }
  973. EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
  974. /**
  975. * hisi_qm_debug_regs_clear() - clear qm debug related registers.
  976. * @qm: The qm for which we want to clear its debug registers.
  977. */
  978. void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
  979. {
  980. const struct debugfs_reg32 *regs;
  981. int i;
  982. /* clear current_qm */
  983. writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
  984. writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
  985. /* clear current_q */
  986. writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
  987. writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
  988. /*
  989. * these registers are reading and clearing, so clear them after
  990. * reading them.
  991. */
  992. writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
  993. regs = qm_dfx_regs;
  994. for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
  995. readl(qm->io_base + regs->offset);
  996. regs++;
  997. }
  998. /* clear clear_enable */
  999. writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
  1000. }
  1001. EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);