fsl_ddr_edac.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Freescale Memory Controller kernel module
  4. *
  5. * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
  6. * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
  7. * split out from mpc85xx_edac EDAC driver.
  8. *
  9. * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
  10. *
  11. * Author: Dave Jiang <djiang@mvista.com>
  12. *
  13. * 2006-2007 (c) MontaVista Software, Inc.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/ctype.h>
  19. #include <linux/io.h>
  20. #include <linux/mod_devicetable.h>
  21. #include <linux/edac.h>
  22. #include <linux/smp.h>
  23. #include <linux/gfp.h>
  24. #include <linux/of.h>
  25. #include <linux/of_address.h>
  26. #include "edac_module.h"
  27. #include "fsl_ddr_edac.h"
  28. #define EDAC_MOD_STR "fsl_ddr_edac"
  29. static int edac_mc_idx;
  30. static u32 orig_ddr_err_disable;
  31. static u32 orig_ddr_err_sbe;
  32. static bool little_endian;
  33. static inline u32 ddr_in32(void __iomem *addr)
  34. {
  35. return little_endian ? ioread32(addr) : ioread32be(addr);
  36. }
  37. static inline void ddr_out32(void __iomem *addr, u32 value)
  38. {
  39. if (little_endian)
  40. iowrite32(value, addr);
  41. else
  42. iowrite32be(value, addr);
  43. }
  44. #ifdef CONFIG_EDAC_DEBUG
  45. /************************ MC SYSFS parts ***********************************/
  46. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  47. static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
  48. struct device_attribute *mattr,
  49. char *data)
  50. {
  51. struct mem_ctl_info *mci = to_mci(dev);
  52. struct fsl_mc_pdata *pdata = mci->pvt_info;
  53. return sprintf(data, "0x%08x",
  54. ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
  55. }
  56. static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
  57. struct device_attribute *mattr,
  58. char *data)
  59. {
  60. struct mem_ctl_info *mci = to_mci(dev);
  61. struct fsl_mc_pdata *pdata = mci->pvt_info;
  62. return sprintf(data, "0x%08x",
  63. ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
  64. }
  65. static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
  66. struct device_attribute *mattr,
  67. char *data)
  68. {
  69. struct mem_ctl_info *mci = to_mci(dev);
  70. struct fsl_mc_pdata *pdata = mci->pvt_info;
  71. return sprintf(data, "0x%08x",
  72. ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
  73. }
  74. static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
  75. struct device_attribute *mattr,
  76. const char *data, size_t count)
  77. {
  78. struct mem_ctl_info *mci = to_mci(dev);
  79. struct fsl_mc_pdata *pdata = mci->pvt_info;
  80. unsigned long val;
  81. int rc;
  82. if (isdigit(*data)) {
  83. rc = kstrtoul(data, 0, &val);
  84. if (rc)
  85. return rc;
  86. ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
  87. return count;
  88. }
  89. return 0;
  90. }
  91. static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
  92. struct device_attribute *mattr,
  93. const char *data, size_t count)
  94. {
  95. struct mem_ctl_info *mci = to_mci(dev);
  96. struct fsl_mc_pdata *pdata = mci->pvt_info;
  97. unsigned long val;
  98. int rc;
  99. if (isdigit(*data)) {
  100. rc = kstrtoul(data, 0, &val);
  101. if (rc)
  102. return rc;
  103. ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
  104. return count;
  105. }
  106. return 0;
  107. }
  108. static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
  109. struct device_attribute *mattr,
  110. const char *data, size_t count)
  111. {
  112. struct mem_ctl_info *mci = to_mci(dev);
  113. struct fsl_mc_pdata *pdata = mci->pvt_info;
  114. unsigned long val;
  115. int rc;
  116. if (isdigit(*data)) {
  117. rc = kstrtoul(data, 0, &val);
  118. if (rc)
  119. return rc;
  120. ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
  121. return count;
  122. }
  123. return 0;
  124. }
  125. static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
  126. fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
  127. static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
  128. fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
  129. static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
  130. fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
  131. #endif /* CONFIG_EDAC_DEBUG */
  132. static struct attribute *fsl_ddr_dev_attrs[] = {
  133. #ifdef CONFIG_EDAC_DEBUG
  134. &dev_attr_inject_data_hi.attr,
  135. &dev_attr_inject_data_lo.attr,
  136. &dev_attr_inject_ctrl.attr,
  137. #endif
  138. NULL
  139. };
  140. ATTRIBUTE_GROUPS(fsl_ddr_dev);
  141. /**************************** MC Err device ***************************/
  142. /*
  143. * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
  144. * MPC8572 User's Manual. Each line represents a syndrome bit column as a
  145. * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
  146. * below correspond to Freescale's manuals.
  147. */
  148. static unsigned int ecc_table[16] = {
  149. /* MSB LSB */
  150. /* [0:31] [32:63] */
  151. 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
  152. 0x00ff00ff, 0x00fff0ff,
  153. 0x0f0f0f0f, 0x0f0fff00,
  154. 0x11113333, 0x7777000f,
  155. 0x22224444, 0x8888222f,
  156. 0x44448888, 0xffff4441,
  157. 0x8888ffff, 0x11118882,
  158. 0xffff1111, 0x22221114, /* Syndrome bit 0 */
  159. };
  160. /*
  161. * Calculate the correct ECC value for a 64-bit value specified by high:low
  162. */
  163. static u8 calculate_ecc(u32 high, u32 low)
  164. {
  165. u32 mask_low;
  166. u32 mask_high;
  167. int bit_cnt;
  168. u8 ecc = 0;
  169. int i;
  170. int j;
  171. for (i = 0; i < 8; i++) {
  172. mask_high = ecc_table[i * 2];
  173. mask_low = ecc_table[i * 2 + 1];
  174. bit_cnt = 0;
  175. for (j = 0; j < 32; j++) {
  176. if ((mask_high >> j) & 1)
  177. bit_cnt ^= (high >> j) & 1;
  178. if ((mask_low >> j) & 1)
  179. bit_cnt ^= (low >> j) & 1;
  180. }
  181. ecc |= bit_cnt << i;
  182. }
  183. return ecc;
  184. }
  185. /*
  186. * Create the syndrome code which is generated if the data line specified by
  187. * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
  188. * User's Manual and 9-61 in the MPC8572 User's Manual.
  189. */
  190. static u8 syndrome_from_bit(unsigned int bit) {
  191. int i;
  192. u8 syndrome = 0;
  193. /*
  194. * Cycle through the upper or lower 32-bit portion of each value in
  195. * ecc_table depending on if 'bit' is in the upper or lower half of
  196. * 64-bit data.
  197. */
  198. for (i = bit < 32; i < 16; i += 2)
  199. syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
  200. return syndrome;
  201. }
  202. /*
  203. * Decode data and ecc syndrome to determine what went wrong
  204. * Note: This can only decode single-bit errors
  205. */
  206. static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
  207. int *bad_data_bit, int *bad_ecc_bit)
  208. {
  209. int i;
  210. u8 syndrome;
  211. *bad_data_bit = -1;
  212. *bad_ecc_bit = -1;
  213. /*
  214. * Calculate the ECC of the captured data and XOR it with the captured
  215. * ECC to find an ECC syndrome value we can search for
  216. */
  217. syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
  218. /* Check if a data line is stuck... */
  219. for (i = 0; i < 64; i++) {
  220. if (syndrome == syndrome_from_bit(i)) {
  221. *bad_data_bit = i;
  222. return;
  223. }
  224. }
  225. /* If data is correct, check ECC bits for errors... */
  226. for (i = 0; i < 8; i++) {
  227. if ((syndrome >> i) & 0x1) {
  228. *bad_ecc_bit = i;
  229. return;
  230. }
  231. }
  232. }
  233. #define make64(high, low) (((u64)(high) << 32) | (low))
  234. static void fsl_mc_check(struct mem_ctl_info *mci)
  235. {
  236. struct fsl_mc_pdata *pdata = mci->pvt_info;
  237. struct csrow_info *csrow;
  238. u32 bus_width;
  239. u32 err_detect;
  240. u32 syndrome;
  241. u64 err_addr;
  242. u32 pfn;
  243. int row_index;
  244. u32 cap_high;
  245. u32 cap_low;
  246. int bad_data_bit;
  247. int bad_ecc_bit;
  248. err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
  249. if (!err_detect)
  250. return;
  251. fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
  252. err_detect);
  253. /* no more processing if not ECC bit errors */
  254. if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
  255. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
  256. return;
  257. }
  258. syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
  259. /* Mask off appropriate bits of syndrome based on bus width */
  260. bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
  261. DSC_DBW_MASK) ? 32 : 64;
  262. if (bus_width == 64)
  263. syndrome &= 0xff;
  264. else
  265. syndrome &= 0xffff;
  266. err_addr = make64(
  267. ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
  268. ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
  269. pfn = err_addr >> PAGE_SHIFT;
  270. for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
  271. csrow = mci->csrows[row_index];
  272. if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
  273. break;
  274. }
  275. cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
  276. cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
  277. /*
  278. * Analyze single-bit errors on 64-bit wide buses
  279. * TODO: Add support for 32-bit wide buses
  280. */
  281. if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
  282. u64 cap = (u64)cap_high << 32 | cap_low;
  283. u32 s = syndrome;
  284. sbe_ecc_decode(cap_high, cap_low, syndrome,
  285. &bad_data_bit, &bad_ecc_bit);
  286. if (bad_data_bit >= 0) {
  287. fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
  288. cap ^= 1ULL << bad_data_bit;
  289. }
  290. if (bad_ecc_bit >= 0) {
  291. fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
  292. s ^= 1 << bad_ecc_bit;
  293. }
  294. fsl_mc_printk(mci, KERN_ERR,
  295. "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  296. upper_32_bits(cap), lower_32_bits(cap), s);
  297. }
  298. fsl_mc_printk(mci, KERN_ERR,
  299. "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  300. cap_high, cap_low, syndrome);
  301. fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
  302. fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
  303. /* we are out of range */
  304. if (row_index == mci->nr_csrows)
  305. fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
  306. if (err_detect & DDR_EDE_SBE)
  307. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  308. pfn, err_addr & ~PAGE_MASK, syndrome,
  309. row_index, 0, -1,
  310. mci->ctl_name, "");
  311. if (err_detect & DDR_EDE_MBE)
  312. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  313. pfn, err_addr & ~PAGE_MASK, syndrome,
  314. row_index, 0, -1,
  315. mci->ctl_name, "");
  316. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
  317. }
  318. static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
  319. {
  320. struct mem_ctl_info *mci = dev_id;
  321. struct fsl_mc_pdata *pdata = mci->pvt_info;
  322. u32 err_detect;
  323. err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
  324. if (!err_detect)
  325. return IRQ_NONE;
  326. fsl_mc_check(mci);
  327. return IRQ_HANDLED;
  328. }
  329. static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
  330. {
  331. struct fsl_mc_pdata *pdata = mci->pvt_info;
  332. struct csrow_info *csrow;
  333. struct dimm_info *dimm;
  334. u32 sdram_ctl;
  335. u32 sdtype;
  336. enum mem_type mtype;
  337. u32 cs_bnds;
  338. int index;
  339. sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
  340. sdtype = sdram_ctl & DSC_SDTYPE_MASK;
  341. if (sdram_ctl & DSC_RD_EN) {
  342. switch (sdtype) {
  343. case 0x02000000:
  344. mtype = MEM_RDDR;
  345. break;
  346. case 0x03000000:
  347. mtype = MEM_RDDR2;
  348. break;
  349. case 0x07000000:
  350. mtype = MEM_RDDR3;
  351. break;
  352. case 0x05000000:
  353. mtype = MEM_RDDR4;
  354. break;
  355. default:
  356. mtype = MEM_UNKNOWN;
  357. break;
  358. }
  359. } else {
  360. switch (sdtype) {
  361. case 0x02000000:
  362. mtype = MEM_DDR;
  363. break;
  364. case 0x03000000:
  365. mtype = MEM_DDR2;
  366. break;
  367. case 0x07000000:
  368. mtype = MEM_DDR3;
  369. break;
  370. case 0x05000000:
  371. mtype = MEM_DDR4;
  372. break;
  373. default:
  374. mtype = MEM_UNKNOWN;
  375. break;
  376. }
  377. }
  378. for (index = 0; index < mci->nr_csrows; index++) {
  379. u32 start;
  380. u32 end;
  381. csrow = mci->csrows[index];
  382. dimm = csrow->channels[0]->dimm;
  383. cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
  384. (index * FSL_MC_CS_BNDS_OFS));
  385. start = (cs_bnds & 0xffff0000) >> 16;
  386. end = (cs_bnds & 0x0000ffff);
  387. if (start == end)
  388. continue; /* not populated */
  389. start <<= (24 - PAGE_SHIFT);
  390. end <<= (24 - PAGE_SHIFT);
  391. end |= (1 << (24 - PAGE_SHIFT)) - 1;
  392. csrow->first_page = start;
  393. csrow->last_page = end;
  394. dimm->nr_pages = end + 1 - start;
  395. dimm->grain = 8;
  396. dimm->mtype = mtype;
  397. dimm->dtype = DEV_UNKNOWN;
  398. if (sdram_ctl & DSC_X32_EN)
  399. dimm->dtype = DEV_X32;
  400. dimm->edac_mode = EDAC_SECDED;
  401. }
  402. }
  403. int fsl_mc_err_probe(struct platform_device *op)
  404. {
  405. struct mem_ctl_info *mci;
  406. struct edac_mc_layer layers[2];
  407. struct fsl_mc_pdata *pdata;
  408. struct resource r;
  409. u32 sdram_ctl;
  410. int res;
  411. if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
  412. return -ENOMEM;
  413. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  414. layers[0].size = 4;
  415. layers[0].is_virt_csrow = true;
  416. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  417. layers[1].size = 1;
  418. layers[1].is_virt_csrow = false;
  419. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  420. sizeof(*pdata));
  421. if (!mci) {
  422. devres_release_group(&op->dev, fsl_mc_err_probe);
  423. return -ENOMEM;
  424. }
  425. pdata = mci->pvt_info;
  426. pdata->name = "fsl_mc_err";
  427. mci->pdev = &op->dev;
  428. pdata->edac_idx = edac_mc_idx++;
  429. dev_set_drvdata(mci->pdev, mci);
  430. mci->ctl_name = pdata->name;
  431. mci->dev_name = pdata->name;
  432. /*
  433. * Get the endianness of DDR controller registers.
  434. * Default is big endian.
  435. */
  436. little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
  437. res = of_address_to_resource(op->dev.of_node, 0, &r);
  438. if (res) {
  439. pr_err("%s: Unable to get resource for MC err regs\n",
  440. __func__);
  441. goto err;
  442. }
  443. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  444. pdata->name)) {
  445. pr_err("%s: Error while requesting mem region\n",
  446. __func__);
  447. res = -EBUSY;
  448. goto err;
  449. }
  450. pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  451. if (!pdata->mc_vbase) {
  452. pr_err("%s: Unable to setup MC err regs\n", __func__);
  453. res = -ENOMEM;
  454. goto err;
  455. }
  456. sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
  457. if (!(sdram_ctl & DSC_ECC_EN)) {
  458. /* no ECC */
  459. pr_warn("%s: No ECC DIMMs discovered\n", __func__);
  460. res = -ENODEV;
  461. goto err;
  462. }
  463. edac_dbg(3, "init mci\n");
  464. mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
  465. MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
  466. MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
  467. MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
  468. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  469. mci->edac_cap = EDAC_FLAG_SECDED;
  470. mci->mod_name = EDAC_MOD_STR;
  471. if (edac_op_state == EDAC_OPSTATE_POLL)
  472. mci->edac_check = fsl_mc_check;
  473. mci->ctl_page_to_phys = NULL;
  474. mci->scrub_mode = SCRUB_SW_SRC;
  475. fsl_ddr_init_csrows(mci);
  476. /* store the original error disable bits */
  477. orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
  478. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
  479. /* clear all error bits */
  480. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
  481. res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
  482. if (res) {
  483. edac_dbg(3, "failed edac_mc_add_mc()\n");
  484. goto err;
  485. }
  486. if (edac_op_state == EDAC_OPSTATE_INT) {
  487. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
  488. DDR_EIE_MBEE | DDR_EIE_SBEE);
  489. /* store the original error management threshold */
  490. orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
  491. FSL_MC_ERR_SBE) & 0xff0000;
  492. /* set threshold to 1 error per interrupt */
  493. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
  494. /* register interrupts */
  495. pdata->irq = platform_get_irq(op, 0);
  496. res = devm_request_irq(&op->dev, pdata->irq,
  497. fsl_mc_isr,
  498. IRQF_SHARED,
  499. "[EDAC] MC err", mci);
  500. if (res < 0) {
  501. pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
  502. __func__, pdata->irq);
  503. res = -ENODEV;
  504. goto err2;
  505. }
  506. pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
  507. pdata->irq);
  508. }
  509. devres_remove_group(&op->dev, fsl_mc_err_probe);
  510. edac_dbg(3, "success\n");
  511. pr_info(EDAC_MOD_STR " MC err registered\n");
  512. return 0;
  513. err2:
  514. edac_mc_del_mc(&op->dev);
  515. err:
  516. devres_release_group(&op->dev, fsl_mc_err_probe);
  517. edac_mc_free(mci);
  518. return res;
  519. }
  520. void fsl_mc_err_remove(struct platform_device *op)
  521. {
  522. struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
  523. struct fsl_mc_pdata *pdata = mci->pvt_info;
  524. edac_dbg(0, "\n");
  525. if (edac_op_state == EDAC_OPSTATE_INT) {
  526. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
  527. }
  528. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
  529. orig_ddr_err_disable);
  530. ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
  531. edac_mc_del_mc(&op->dev);
  532. edac_mc_free(mci);
  533. }