armada_xp_edac.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Pengutronix, Jan Luebbe <kernel@pengutronix.de>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/edac.h>
  7. #include <linux/of.h>
  8. #include <linux/of_device.h>
  9. #include <linux/platform_device.h>
  10. #include <asm/hardware/cache-l2x0.h>
  11. #include <asm/hardware/cache-aurora-l2.h>
  12. #include "edac_mc.h"
  13. #include "edac_device.h"
  14. #include "edac_module.h"
  15. /************************ EDAC MC (DDR RAM) ********************************/
  16. #define SDRAM_NUM_CS 4
  17. #define SDRAM_CONFIG_REG 0x0
  18. #define SDRAM_CONFIG_ECC_MASK BIT(18)
  19. #define SDRAM_CONFIG_REGISTERED_MASK BIT(17)
  20. #define SDRAM_CONFIG_BUS_WIDTH_MASK BIT(15)
  21. #define SDRAM_ADDR_CTRL_REG 0x10
  22. #define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs)
  23. #define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs))
  24. #define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs)
  25. #define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2)
  26. #define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs))
  27. #define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4)
  28. #define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs))
  29. #define SDRAM_ERR_DATA_H_REG 0x40
  30. #define SDRAM_ERR_DATA_L_REG 0x44
  31. #define SDRAM_ERR_RECV_ECC_REG 0x48
  32. #define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff
  33. #define SDRAM_ERR_CALC_ECC_REG 0x4c
  34. #define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8
  35. #define SDRAM_ERR_CALC_ECC_ROW_MASK (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET)
  36. #define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff
  37. #define SDRAM_ERR_ADDR_REG 0x50
  38. #define SDRAM_ERR_ADDR_BANK_OFFSET 23
  39. #define SDRAM_ERR_ADDR_BANK_MASK (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET)
  40. #define SDRAM_ERR_ADDR_COL_OFFSET 8
  41. #define SDRAM_ERR_ADDR_COL_MASK (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET)
  42. #define SDRAM_ERR_ADDR_CS_OFFSET 1
  43. #define SDRAM_ERR_ADDR_CS_MASK (0x3 << SDRAM_ERR_ADDR_CS_OFFSET)
  44. #define SDRAM_ERR_ADDR_TYPE_MASK BIT(0)
  45. #define SDRAM_ERR_CTRL_REG 0x54
  46. #define SDRAM_ERR_CTRL_THR_OFFSET 16
  47. #define SDRAM_ERR_CTRL_THR_MASK (0xff << SDRAM_ERR_CTRL_THR_OFFSET)
  48. #define SDRAM_ERR_CTRL_PROP_MASK BIT(9)
  49. #define SDRAM_ERR_SBE_COUNT_REG 0x58
  50. #define SDRAM_ERR_DBE_COUNT_REG 0x5c
  51. #define SDRAM_ERR_CAUSE_ERR_REG 0xd0
  52. #define SDRAM_ERR_CAUSE_MSG_REG 0xd8
  53. #define SDRAM_ERR_CAUSE_DBE_MASK BIT(1)
  54. #define SDRAM_ERR_CAUSE_SBE_MASK BIT(0)
  55. #define SDRAM_RANK_CTRL_REG 0x1e0
  56. #define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs)
  57. struct axp_mc_drvdata {
  58. void __iomem *base;
  59. /* width in bytes */
  60. unsigned int width;
  61. /* bank interleaving */
  62. bool cs_addr_sel[SDRAM_NUM_CS];
  63. char msg[128];
  64. };
  65. /* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
  66. static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
  67. uint8_t cs, uint8_t bank, uint16_t row,
  68. uint16_t col)
  69. {
  70. if (drvdata->width == 8) {
  71. /* 64 bit */
  72. if (drvdata->cs_addr_sel[cs])
  73. /* bank interleaved */
  74. return (((row & 0xfff8) << 16) |
  75. ((bank & 0x7) << 16) |
  76. ((row & 0x7) << 13) |
  77. ((col & 0x3ff) << 3));
  78. else
  79. return (((row & 0xffff << 16) |
  80. ((bank & 0x7) << 13) |
  81. ((col & 0x3ff)) << 3));
  82. } else if (drvdata->width == 4) {
  83. /* 32 bit */
  84. if (drvdata->cs_addr_sel[cs])
  85. /* bank interleaved */
  86. return (((row & 0xfff0) << 15) |
  87. ((bank & 0x7) << 16) |
  88. ((row & 0xf) << 12) |
  89. ((col & 0x3ff) << 2));
  90. else
  91. return (((row & 0xffff << 15) |
  92. ((bank & 0x7) << 12) |
  93. ((col & 0x3ff)) << 2));
  94. } else {
  95. /* 16 bit */
  96. if (drvdata->cs_addr_sel[cs])
  97. /* bank interleaved */
  98. return (((row & 0xffe0) << 14) |
  99. ((bank & 0x7) << 16) |
  100. ((row & 0x1f) << 11) |
  101. ((col & 0x3ff) << 1));
  102. else
  103. return (((row & 0xffff << 14) |
  104. ((bank & 0x7) << 11) |
  105. ((col & 0x3ff)) << 1));
  106. }
  107. }
  108. static void axp_mc_check(struct mem_ctl_info *mci)
  109. {
  110. struct axp_mc_drvdata *drvdata = mci->pvt_info;
  111. uint32_t data_h, data_l, recv_ecc, calc_ecc, addr;
  112. uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg;
  113. uint32_t row_val, col_val, bank_val, addr_val;
  114. uint8_t syndrome_val, cs_val;
  115. char *msg = drvdata->msg;
  116. data_h = readl(drvdata->base + SDRAM_ERR_DATA_H_REG);
  117. data_l = readl(drvdata->base + SDRAM_ERR_DATA_L_REG);
  118. recv_ecc = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG);
  119. calc_ecc = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG);
  120. addr = readl(drvdata->base + SDRAM_ERR_ADDR_REG);
  121. cnt_sbe = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
  122. cnt_dbe = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
  123. cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
  124. cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
  125. /* clear cause registers */
  126. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
  127. drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
  128. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
  129. drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
  130. /* clear error counter registers */
  131. if (cnt_sbe)
  132. writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
  133. if (cnt_dbe)
  134. writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
  135. if (!cnt_sbe && !cnt_dbe)
  136. return;
  137. if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
  138. if (cnt_sbe)
  139. cnt_sbe--;
  140. else
  141. dev_warn(mci->pdev, "inconsistent SBE count detected\n");
  142. } else {
  143. if (cnt_dbe)
  144. cnt_dbe--;
  145. else
  146. dev_warn(mci->pdev, "inconsistent DBE count detected\n");
  147. }
  148. /* report earlier errors */
  149. if (cnt_sbe)
  150. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  151. cnt_sbe, /* error count */
  152. 0, 0, 0, /* pfn, offset, syndrome */
  153. -1, -1, -1, /* top, mid, low layer */
  154. mci->ctl_name,
  155. "details unavailable (multiple errors)");
  156. if (cnt_dbe)
  157. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  158. cnt_dbe, /* error count */
  159. 0, 0, 0, /* pfn, offset, syndrome */
  160. -1, -1, -1, /* top, mid, low layer */
  161. mci->ctl_name,
  162. "details unavailable (multiple errors)");
  163. /* report details for most recent error */
  164. cs_val = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET;
  165. bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET;
  166. row_val = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET;
  167. col_val = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET;
  168. syndrome_val = (recv_ecc ^ calc_ecc) & 0xff;
  169. addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val,
  170. col_val);
  171. msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */
  172. msg += sprintf(msg, "bank=0x%x ", bank_val); /* 9 chars */
  173. msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */
  174. msg += sprintf(msg, "cs=%d", cs_val); /* 4 chars */
  175. if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
  176. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  177. 1, /* error count */
  178. addr_val >> PAGE_SHIFT,
  179. addr_val & ~PAGE_MASK,
  180. syndrome_val,
  181. cs_val, -1, -1, /* top, mid, low layer */
  182. mci->ctl_name, drvdata->msg);
  183. } else {
  184. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  185. 1, /* error count */
  186. addr_val >> PAGE_SHIFT,
  187. addr_val & ~PAGE_MASK,
  188. syndrome_val,
  189. cs_val, -1, -1, /* top, mid, low layer */
  190. mci->ctl_name, drvdata->msg);
  191. }
  192. }
  193. static void axp_mc_read_config(struct mem_ctl_info *mci)
  194. {
  195. struct axp_mc_drvdata *drvdata = mci->pvt_info;
  196. uint32_t config, addr_ctrl, rank_ctrl;
  197. unsigned int i, cs_struct, cs_size;
  198. struct dimm_info *dimm;
  199. config = readl(drvdata->base + SDRAM_CONFIG_REG);
  200. if (config & SDRAM_CONFIG_BUS_WIDTH_MASK)
  201. /* 64 bit */
  202. drvdata->width = 8;
  203. else
  204. /* 32 bit */
  205. drvdata->width = 4;
  206. addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG);
  207. rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG);
  208. for (i = 0; i < SDRAM_NUM_CS; i++) {
  209. dimm = mci->dimms[i];
  210. if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i)))
  211. continue;
  212. drvdata->cs_addr_sel[i] =
  213. !!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i));
  214. cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i);
  215. cs_size = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) |
  216. ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i)));
  217. switch (cs_size) {
  218. case 0: /* 2GBit */
  219. dimm->nr_pages = 524288;
  220. break;
  221. case 1: /* 256MBit */
  222. dimm->nr_pages = 65536;
  223. break;
  224. case 2: /* 512MBit */
  225. dimm->nr_pages = 131072;
  226. break;
  227. case 3: /* 1GBit */
  228. dimm->nr_pages = 262144;
  229. break;
  230. case 4: /* 4GBit */
  231. dimm->nr_pages = 1048576;
  232. break;
  233. case 5: /* 8GBit */
  234. dimm->nr_pages = 2097152;
  235. break;
  236. }
  237. dimm->grain = 8;
  238. dimm->dtype = cs_struct ? DEV_X16 : DEV_X8;
  239. dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ?
  240. MEM_RDDR3 : MEM_DDR3;
  241. dimm->edac_mode = EDAC_SECDED;
  242. }
  243. }
  244. static const struct of_device_id axp_mc_of_match[] = {
  245. {.compatible = "marvell,armada-xp-sdram-controller",},
  246. {},
  247. };
  248. MODULE_DEVICE_TABLE(of, axp_mc_of_match);
  249. static int axp_mc_probe(struct platform_device *pdev)
  250. {
  251. struct axp_mc_drvdata *drvdata;
  252. struct edac_mc_layer layers[1];
  253. const struct of_device_id *id;
  254. struct mem_ctl_info *mci;
  255. void __iomem *base;
  256. uint32_t config;
  257. base = devm_platform_ioremap_resource(pdev, 0);
  258. if (IS_ERR(base)) {
  259. dev_err(&pdev->dev, "Unable to map regs\n");
  260. return PTR_ERR(base);
  261. }
  262. config = readl(base + SDRAM_CONFIG_REG);
  263. if (!(config & SDRAM_CONFIG_ECC_MASK)) {
  264. dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
  265. return -EINVAL;
  266. }
  267. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  268. layers[0].size = SDRAM_NUM_CS;
  269. layers[0].is_virt_csrow = true;
  270. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
  271. if (!mci)
  272. return -ENOMEM;
  273. drvdata = mci->pvt_info;
  274. drvdata->base = base;
  275. mci->pdev = &pdev->dev;
  276. platform_set_drvdata(pdev, mci);
  277. id = of_match_device(axp_mc_of_match, &pdev->dev);
  278. mci->edac_check = axp_mc_check;
  279. mci->mtype_cap = MEM_FLAG_DDR3;
  280. mci->edac_cap = EDAC_FLAG_SECDED;
  281. mci->mod_name = pdev->dev.driver->name;
  282. mci->ctl_name = id ? id->compatible : "unknown";
  283. mci->dev_name = dev_name(&pdev->dev);
  284. mci->scrub_mode = SCRUB_NONE;
  285. axp_mc_read_config(mci);
  286. /* These SoCs have a reduced width bus */
  287. if (of_machine_is_compatible("marvell,armada380") ||
  288. of_machine_is_compatible("marvell,armadaxp-98dx3236"))
  289. drvdata->width /= 2;
  290. /* configure SBE threshold */
  291. /* it seems that SBEs are not captured otherwise */
  292. writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG);
  293. /* clear cause registers */
  294. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
  295. writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
  296. /* clear counter registers */
  297. writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
  298. writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
  299. if (edac_mc_add_mc(mci)) {
  300. edac_mc_free(mci);
  301. return -EINVAL;
  302. }
  303. edac_op_state = EDAC_OPSTATE_POLL;
  304. return 0;
  305. }
  306. static void axp_mc_remove(struct platform_device *pdev)
  307. {
  308. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  309. edac_mc_del_mc(&pdev->dev);
  310. edac_mc_free(mci);
  311. platform_set_drvdata(pdev, NULL);
  312. }
  313. static struct platform_driver axp_mc_driver = {
  314. .probe = axp_mc_probe,
  315. .remove_new = axp_mc_remove,
  316. .driver = {
  317. .name = "armada_xp_mc_edac",
  318. .of_match_table = of_match_ptr(axp_mc_of_match),
  319. },
  320. };
  321. /************************ EDAC Device (L2 Cache) ***************************/
  322. struct aurora_l2_drvdata {
  323. void __iomem *base;
  324. char msg[128];
  325. /* error injection via debugfs */
  326. uint32_t inject_addr;
  327. uint32_t inject_mask;
  328. uint8_t inject_ctl;
  329. struct dentry *debugfs;
  330. };
  331. #ifdef CONFIG_EDAC_DEBUG
  332. static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata)
  333. {
  334. drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK;
  335. drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK;
  336. writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
  337. writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG);
  338. writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
  339. }
  340. #endif
  341. static void aurora_l2_check(struct edac_device_ctl_info *dci)
  342. {
  343. struct aurora_l2_drvdata *drvdata = dci->pvt_info;
  344. uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap;
  345. unsigned int cnt_ce, cnt_ue;
  346. char *msg = drvdata->msg;
  347. size_t size = sizeof(drvdata->msg);
  348. size_t len = 0;
  349. cnt = readl(drvdata->base + AURORA_ERR_CNT_REG);
  350. attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG);
  351. addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG);
  352. way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG);
  353. cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET;
  354. cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET;
  355. /* clear error counter registers */
  356. if (cnt_ce || cnt_ue)
  357. writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
  358. if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID))
  359. goto clear_remaining;
  360. src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
  361. if (src <= 3)
  362. len += scnprintf(msg+len, size-len, "src=CPU%d ", src);
  363. else
  364. len += scnprintf(msg+len, size-len, "src=IO ");
  365. txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
  366. switch (txn) {
  367. case 0:
  368. len += scnprintf(msg+len, size-len, "txn=Data-Read ");
  369. break;
  370. case 1:
  371. len += scnprintf(msg+len, size-len, "txn=Isn-Read ");
  372. break;
  373. case 2:
  374. len += scnprintf(msg+len, size-len, "txn=Clean-Flush ");
  375. break;
  376. case 3:
  377. len += scnprintf(msg+len, size-len, "txn=Eviction ");
  378. break;
  379. case 4:
  380. len += scnprintf(msg+len, size-len,
  381. "txn=Read-Modify-Write ");
  382. break;
  383. }
  384. err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
  385. switch (err) {
  386. case 0:
  387. len += scnprintf(msg+len, size-len, "err=CorrECC ");
  388. break;
  389. case 1:
  390. len += scnprintf(msg+len, size-len, "err=UnCorrECC ");
  391. break;
  392. case 2:
  393. len += scnprintf(msg+len, size-len, "err=TagParity ");
  394. break;
  395. }
  396. len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
  397. len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
  398. len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
  399. /* clear error capture registers */
  400. writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
  401. if (err) {
  402. /* UnCorrECC or TagParity */
  403. if (cnt_ue)
  404. cnt_ue--;
  405. edac_device_handle_ue(dci, 0, 0, drvdata->msg);
  406. } else {
  407. if (cnt_ce)
  408. cnt_ce--;
  409. edac_device_handle_ce(dci, 0, 0, drvdata->msg);
  410. }
  411. clear_remaining:
  412. /* report remaining errors */
  413. while (cnt_ue--)
  414. edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
  415. while (cnt_ce--)
  416. edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
  417. }
  418. static void aurora_l2_poll(struct edac_device_ctl_info *dci)
  419. {
  420. #ifdef CONFIG_EDAC_DEBUG
  421. struct aurora_l2_drvdata *drvdata = dci->pvt_info;
  422. #endif
  423. aurora_l2_check(dci);
  424. #ifdef CONFIG_EDAC_DEBUG
  425. aurora_l2_inject(drvdata);
  426. #endif
  427. }
  428. static const struct of_device_id aurora_l2_of_match[] = {
  429. {.compatible = "marvell,aurora-system-cache",},
  430. {},
  431. };
  432. MODULE_DEVICE_TABLE(of, aurora_l2_of_match);
  433. static int aurora_l2_probe(struct platform_device *pdev)
  434. {
  435. struct aurora_l2_drvdata *drvdata;
  436. struct edac_device_ctl_info *dci;
  437. const struct of_device_id *id;
  438. uint32_t l2x0_aux_ctrl;
  439. void __iomem *base;
  440. base = devm_platform_ioremap_resource(pdev, 0);
  441. if (IS_ERR(base)) {
  442. dev_err(&pdev->dev, "Unable to map regs\n");
  443. return PTR_ERR(base);
  444. }
  445. l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
  446. if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
  447. dev_warn(&pdev->dev, "tag parity is not enabled\n");
  448. if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
  449. dev_warn(&pdev->dev, "data ECC is not enabled\n");
  450. dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
  451. "cpu", 1, "L", 1, 2, 0);
  452. if (!dci)
  453. return -ENOMEM;
  454. drvdata = dci->pvt_info;
  455. drvdata->base = base;
  456. dci->dev = &pdev->dev;
  457. platform_set_drvdata(pdev, dci);
  458. id = of_match_device(aurora_l2_of_match, &pdev->dev);
  459. dci->edac_check = aurora_l2_poll;
  460. dci->mod_name = pdev->dev.driver->name;
  461. dci->ctl_name = id ? id->compatible : "unknown";
  462. dci->dev_name = dev_name(&pdev->dev);
  463. /* clear registers */
  464. writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
  465. writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
  466. if (edac_device_add_device(dci)) {
  467. edac_device_free_ctl_info(dci);
  468. return -EINVAL;
  469. }
  470. #ifdef CONFIG_EDAC_DEBUG
  471. drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev));
  472. if (drvdata->debugfs) {
  473. edac_debugfs_create_x32("inject_addr", 0644,
  474. drvdata->debugfs,
  475. &drvdata->inject_addr);
  476. edac_debugfs_create_x32("inject_mask", 0644,
  477. drvdata->debugfs,
  478. &drvdata->inject_mask);
  479. edac_debugfs_create_x8("inject_ctl", 0644,
  480. drvdata->debugfs, &drvdata->inject_ctl);
  481. }
  482. #endif
  483. return 0;
  484. }
  485. static void aurora_l2_remove(struct platform_device *pdev)
  486. {
  487. struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
  488. #ifdef CONFIG_EDAC_DEBUG
  489. struct aurora_l2_drvdata *drvdata = dci->pvt_info;
  490. edac_debugfs_remove_recursive(drvdata->debugfs);
  491. #endif
  492. edac_device_del_device(&pdev->dev);
  493. edac_device_free_ctl_info(dci);
  494. platform_set_drvdata(pdev, NULL);
  495. }
  496. static struct platform_driver aurora_l2_driver = {
  497. .probe = aurora_l2_probe,
  498. .remove_new = aurora_l2_remove,
  499. .driver = {
  500. .name = "aurora_l2_edac",
  501. .of_match_table = of_match_ptr(aurora_l2_of_match),
  502. },
  503. };
  504. /************************ Driver registration ******************************/
  505. static struct platform_driver * const drivers[] = {
  506. &axp_mc_driver,
  507. &aurora_l2_driver,
  508. };
  509. static int __init armada_xp_edac_init(void)
  510. {
  511. int res;
  512. if (ghes_get_devices())
  513. return -EBUSY;
  514. /* only polling is supported */
  515. edac_op_state = EDAC_OPSTATE_POLL;
  516. res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  517. if (res)
  518. pr_warn("Armada XP EDAC drivers fail to register\n");
  519. return 0;
  520. }
  521. module_init(armada_xp_edac_init);
  522. static void __exit armada_xp_edac_exit(void)
  523. {
  524. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  525. }
  526. module_exit(armada_xp_edac_exit);
  527. MODULE_LICENSE("GPL v2");
  528. MODULE_AUTHOR("Pengutronix");
  529. MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");