synopsys_edac.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Synopsys DDR ECC Driver
  4. * This driver is based on ppc4xx_edac.c drivers
  5. *
  6. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  7. */
  8. #include <linux/edac.h>
  9. #include <linux/module.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/sizes.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/of.h>
  15. #include "edac_module.h"
  16. /* Number of cs_rows needed per memory controller */
  17. #define SYNPS_EDAC_NR_CSROWS 1
  18. /* Number of channels per memory controller */
  19. #define SYNPS_EDAC_NR_CHANS 1
  20. /* Granularity of reported error in bytes */
  21. #define SYNPS_EDAC_ERR_GRAIN 1
  22. #define SYNPS_EDAC_MSG_SIZE 256
  23. #define SYNPS_EDAC_MOD_STRING "synps_edac"
  24. #define SYNPS_EDAC_MOD_VER "1"
  25. /* Synopsys DDR memory controller registers that are relevant to ECC */
  26. #define CTRL_OFST 0x0
  27. #define T_ZQ_OFST 0xA4
  28. /* ECC control register */
  29. #define ECC_CTRL_OFST 0xC4
  30. /* ECC log register */
  31. #define CE_LOG_OFST 0xC8
  32. /* ECC address register */
  33. #define CE_ADDR_OFST 0xCC
  34. /* ECC data[31:0] register */
  35. #define CE_DATA_31_0_OFST 0xD0
  36. /* Uncorrectable error info registers */
  37. #define UE_LOG_OFST 0xDC
  38. #define UE_ADDR_OFST 0xE0
  39. #define UE_DATA_31_0_OFST 0xE4
  40. #define STAT_OFST 0xF0
  41. #define SCRUB_OFST 0xF4
  42. /* Control register bit field definitions */
  43. #define CTRL_BW_MASK 0xC
  44. #define CTRL_BW_SHIFT 2
  45. #define DDRCTL_WDTH_16 1
  46. #define DDRCTL_WDTH_32 0
  47. /* ZQ register bit field definitions */
  48. #define T_ZQ_DDRMODE_MASK 0x2
  49. /* ECC control register bit field definitions */
  50. #define ECC_CTRL_CLR_CE_ERR 0x2
  51. #define ECC_CTRL_CLR_UE_ERR 0x1
  52. /* ECC correctable/uncorrectable error log register definitions */
  53. #define LOG_VALID 0x1
  54. #define CE_LOG_BITPOS_MASK 0xFE
  55. #define CE_LOG_BITPOS_SHIFT 1
  56. /* ECC correctable/uncorrectable error address register definitions */
  57. #define ADDR_COL_MASK 0xFFF
  58. #define ADDR_ROW_MASK 0xFFFF000
  59. #define ADDR_ROW_SHIFT 12
  60. #define ADDR_BANK_MASK 0x70000000
  61. #define ADDR_BANK_SHIFT 28
  62. /* ECC statistic register definitions */
  63. #define STAT_UECNT_MASK 0xFF
  64. #define STAT_CECNT_MASK 0xFF00
  65. #define STAT_CECNT_SHIFT 8
  66. /* ECC scrub register definitions */
  67. #define SCRUB_MODE_MASK 0x7
  68. #define SCRUB_MODE_SECDED 0x4
  69. /* DDR ECC Quirks */
  70. #define DDR_ECC_INTR_SUPPORT BIT(0)
  71. #define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
  72. #define DDR_ECC_INTR_SELF_CLEAR BIT(2)
  73. /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
  74. /* ECC Configuration Registers */
  75. #define ECC_CFG0_OFST 0x70
  76. #define ECC_CFG1_OFST 0x74
  77. /* ECC Status Register */
  78. #define ECC_STAT_OFST 0x78
  79. /* ECC Clear Register */
  80. #define ECC_CLR_OFST 0x7C
  81. /* ECC Error count Register */
  82. #define ECC_ERRCNT_OFST 0x80
  83. /* ECC Corrected Error Address Register */
  84. #define ECC_CEADDR0_OFST 0x84
  85. #define ECC_CEADDR1_OFST 0x88
  86. /* ECC Syndrome Registers */
  87. #define ECC_CSYND0_OFST 0x8C
  88. #define ECC_CSYND1_OFST 0x90
  89. #define ECC_CSYND2_OFST 0x94
  90. /* ECC Bit Mask0 Address Register */
  91. #define ECC_BITMASK0_OFST 0x98
  92. #define ECC_BITMASK1_OFST 0x9C
  93. #define ECC_BITMASK2_OFST 0xA0
  94. /* ECC UnCorrected Error Address Register */
  95. #define ECC_UEADDR0_OFST 0xA4
  96. #define ECC_UEADDR1_OFST 0xA8
  97. /* ECC Syndrome Registers */
  98. #define ECC_UESYND0_OFST 0xAC
  99. #define ECC_UESYND1_OFST 0xB0
  100. #define ECC_UESYND2_OFST 0xB4
  101. /* ECC Poison Address Reg */
  102. #define ECC_POISON0_OFST 0xB8
  103. #define ECC_POISON1_OFST 0xBC
  104. #define ECC_ADDRMAP0_OFFSET 0x200
  105. /* Control register bitfield definitions */
  106. #define ECC_CTRL_BUSWIDTH_MASK 0x3000
  107. #define ECC_CTRL_BUSWIDTH_SHIFT 12
  108. #define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
  109. #define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
  110. /* DDR Control Register width definitions */
  111. #define DDRCTL_EWDTH_16 2
  112. #define DDRCTL_EWDTH_32 1
  113. #define DDRCTL_EWDTH_64 0
  114. /* ECC status register definitions */
  115. #define ECC_STAT_UECNT_MASK 0xF0000
  116. #define ECC_STAT_UECNT_SHIFT 16
  117. #define ECC_STAT_CECNT_MASK 0xF00
  118. #define ECC_STAT_CECNT_SHIFT 8
  119. #define ECC_STAT_BITNUM_MASK 0x7F
  120. /* ECC error count register definitions */
  121. #define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
  122. #define ECC_ERRCNT_UECNT_SHIFT 16
  123. #define ECC_ERRCNT_CECNT_MASK 0xFFFF
  124. /* DDR QOS Interrupt register definitions */
  125. #define DDR_QOS_IRQ_STAT_OFST 0x20200
  126. #define DDR_QOSUE_MASK 0x4
  127. #define DDR_QOSCE_MASK 0x2
  128. #define ECC_CE_UE_INTR_MASK 0x6
  129. #define DDR_QOS_IRQ_EN_OFST 0x20208
  130. #define DDR_QOS_IRQ_DB_OFST 0x2020C
  131. /* DDR QOS Interrupt register definitions */
  132. #define DDR_UE_MASK BIT(9)
  133. #define DDR_CE_MASK BIT(8)
  134. /* ECC Corrected Error Register Mask and Shifts*/
  135. #define ECC_CEADDR0_RW_MASK 0x3FFFF
  136. #define ECC_CEADDR0_RNK_MASK BIT(24)
  137. #define ECC_CEADDR1_BNKGRP_MASK 0x3000000
  138. #define ECC_CEADDR1_BNKNR_MASK 0x70000
  139. #define ECC_CEADDR1_BLKNR_MASK 0xFFF
  140. #define ECC_CEADDR1_BNKGRP_SHIFT 24
  141. #define ECC_CEADDR1_BNKNR_SHIFT 16
  142. /* ECC Poison register shifts */
  143. #define ECC_POISON0_RANK_SHIFT 24
  144. #define ECC_POISON0_RANK_MASK BIT(24)
  145. #define ECC_POISON0_COLUMN_SHIFT 0
  146. #define ECC_POISON0_COLUMN_MASK 0xFFF
  147. #define ECC_POISON1_BG_SHIFT 28
  148. #define ECC_POISON1_BG_MASK 0x30000000
  149. #define ECC_POISON1_BANKNR_SHIFT 24
  150. #define ECC_POISON1_BANKNR_MASK 0x7000000
  151. #define ECC_POISON1_ROW_SHIFT 0
  152. #define ECC_POISON1_ROW_MASK 0x3FFFF
  153. /* DDR Memory type defines */
  154. #define MEM_TYPE_DDR3 0x1
  155. #define MEM_TYPE_LPDDR3 0x8
  156. #define MEM_TYPE_DDR2 0x4
  157. #define MEM_TYPE_DDR4 0x10
  158. #define MEM_TYPE_LPDDR4 0x20
  159. /* DDRC Software control register */
  160. #define DDRC_SWCTL 0x320
  161. /* DDRC ECC CE & UE poison mask */
  162. #define ECC_CEPOISON_MASK 0x3
  163. #define ECC_UEPOISON_MASK 0x1
  164. /* DDRC Device config masks */
  165. #define DDRC_MSTR_CFG_MASK 0xC0000000
  166. #define DDRC_MSTR_CFG_SHIFT 30
  167. #define DDRC_MSTR_CFG_X4_MASK 0x0
  168. #define DDRC_MSTR_CFG_X8_MASK 0x1
  169. #define DDRC_MSTR_CFG_X16_MASK 0x2
  170. #define DDRC_MSTR_CFG_X32_MASK 0x3
  171. #define DDR_MAX_ROW_SHIFT 18
  172. #define DDR_MAX_COL_SHIFT 14
  173. #define DDR_MAX_BANK_SHIFT 3
  174. #define DDR_MAX_BANKGRP_SHIFT 2
  175. #define ROW_MAX_VAL_MASK 0xF
  176. #define COL_MAX_VAL_MASK 0xF
  177. #define BANK_MAX_VAL_MASK 0x1F
  178. #define BANKGRP_MAX_VAL_MASK 0x1F
  179. #define RANK_MAX_VAL_MASK 0x1F
  180. #define ROW_B0_BASE 6
  181. #define ROW_B1_BASE 7
  182. #define ROW_B2_BASE 8
  183. #define ROW_B3_BASE 9
  184. #define ROW_B4_BASE 10
  185. #define ROW_B5_BASE 11
  186. #define ROW_B6_BASE 12
  187. #define ROW_B7_BASE 13
  188. #define ROW_B8_BASE 14
  189. #define ROW_B9_BASE 15
  190. #define ROW_B10_BASE 16
  191. #define ROW_B11_BASE 17
  192. #define ROW_B12_BASE 18
  193. #define ROW_B13_BASE 19
  194. #define ROW_B14_BASE 20
  195. #define ROW_B15_BASE 21
  196. #define ROW_B16_BASE 22
  197. #define ROW_B17_BASE 23
  198. #define COL_B2_BASE 2
  199. #define COL_B3_BASE 3
  200. #define COL_B4_BASE 4
  201. #define COL_B5_BASE 5
  202. #define COL_B6_BASE 6
  203. #define COL_B7_BASE 7
  204. #define COL_B8_BASE 8
  205. #define COL_B9_BASE 9
  206. #define COL_B10_BASE 10
  207. #define COL_B11_BASE 11
  208. #define COL_B12_BASE 12
  209. #define COL_B13_BASE 13
  210. #define BANK_B0_BASE 2
  211. #define BANK_B1_BASE 3
  212. #define BANK_B2_BASE 4
  213. #define BANKGRP_B0_BASE 2
  214. #define BANKGRP_B1_BASE 3
  215. #define RANK_B0_BASE 6
  216. /**
  217. * struct ecc_error_info - ECC error log information.
  218. * @row: Row number.
  219. * @col: Column number.
  220. * @bank: Bank number.
  221. * @bitpos: Bit position.
  222. * @data: Data causing the error.
  223. * @bankgrpnr: Bank group number.
  224. * @blknr: Block number.
  225. */
  226. struct ecc_error_info {
  227. u32 row;
  228. u32 col;
  229. u32 bank;
  230. u32 bitpos;
  231. u32 data;
  232. u32 bankgrpnr;
  233. u32 blknr;
  234. };
  235. /**
  236. * struct synps_ecc_status - ECC status information to report.
  237. * @ce_cnt: Correctable error count.
  238. * @ue_cnt: Uncorrectable error count.
  239. * @ceinfo: Correctable error log information.
  240. * @ueinfo: Uncorrectable error log information.
  241. */
  242. struct synps_ecc_status {
  243. u32 ce_cnt;
  244. u32 ue_cnt;
  245. struct ecc_error_info ceinfo;
  246. struct ecc_error_info ueinfo;
  247. };
  248. /**
  249. * struct synps_edac_priv - DDR memory controller private instance data.
  250. * @baseaddr: Base address of the DDR controller.
  251. * @reglock: Concurrent CSRs access lock.
  252. * @message: Buffer for framing the event specific info.
  253. * @stat: ECC status information.
  254. * @p_data: Platform data.
  255. * @ce_cnt: Correctable Error count.
  256. * @ue_cnt: Uncorrectable Error count.
  257. * @poison_addr: Data poison address.
  258. * @row_shift: Bit shifts for row bit.
  259. * @col_shift: Bit shifts for column bit.
  260. * @bank_shift: Bit shifts for bank bit.
  261. * @bankgrp_shift: Bit shifts for bank group bit.
  262. * @rank_shift: Bit shifts for rank bit.
  263. */
  264. struct synps_edac_priv {
  265. void __iomem *baseaddr;
  266. spinlock_t reglock;
  267. char message[SYNPS_EDAC_MSG_SIZE];
  268. struct synps_ecc_status stat;
  269. const struct synps_platform_data *p_data;
  270. u32 ce_cnt;
  271. u32 ue_cnt;
  272. #ifdef CONFIG_EDAC_DEBUG
  273. ulong poison_addr;
  274. u32 row_shift[18];
  275. u32 col_shift[14];
  276. u32 bank_shift[3];
  277. u32 bankgrp_shift[2];
  278. u32 rank_shift[1];
  279. #endif
  280. };
  281. /**
  282. * struct synps_platform_data - synps platform data structure.
  283. * @get_error_info: Get EDAC error info.
  284. * @get_mtype: Get mtype.
  285. * @get_dtype: Get dtype.
  286. * @get_ecc_state: Get ECC state.
  287. * @get_mem_info: Get EDAC memory info
  288. * @quirks: To differentiate IPs.
  289. */
  290. struct synps_platform_data {
  291. int (*get_error_info)(struct synps_edac_priv *priv);
  292. enum mem_type (*get_mtype)(const void __iomem *base);
  293. enum dev_type (*get_dtype)(const void __iomem *base);
  294. bool (*get_ecc_state)(void __iomem *base);
  295. #ifdef CONFIG_EDAC_DEBUG
  296. u64 (*get_mem_info)(struct synps_edac_priv *priv);
  297. #endif
  298. int quirks;
  299. };
  300. /**
  301. * zynq_get_error_info - Get the current ECC error info.
  302. * @priv: DDR memory controller private instance data.
  303. *
  304. * Return: one if there is no error, otherwise zero.
  305. */
  306. static int zynq_get_error_info(struct synps_edac_priv *priv)
  307. {
  308. struct synps_ecc_status *p;
  309. u32 regval, clearval = 0;
  310. void __iomem *base;
  311. base = priv->baseaddr;
  312. p = &priv->stat;
  313. regval = readl(base + STAT_OFST);
  314. if (!regval)
  315. return 1;
  316. p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
  317. p->ue_cnt = regval & STAT_UECNT_MASK;
  318. regval = readl(base + CE_LOG_OFST);
  319. if (!(p->ce_cnt && (regval & LOG_VALID)))
  320. goto ue_err;
  321. p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
  322. regval = readl(base + CE_ADDR_OFST);
  323. p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
  324. p->ceinfo.col = regval & ADDR_COL_MASK;
  325. p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
  326. p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
  327. edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
  328. p->ceinfo.data);
  329. clearval = ECC_CTRL_CLR_CE_ERR;
  330. ue_err:
  331. regval = readl(base + UE_LOG_OFST);
  332. if (!(p->ue_cnt && (regval & LOG_VALID)))
  333. goto out;
  334. regval = readl(base + UE_ADDR_OFST);
  335. p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
  336. p->ueinfo.col = regval & ADDR_COL_MASK;
  337. p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
  338. p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
  339. clearval |= ECC_CTRL_CLR_UE_ERR;
  340. out:
  341. writel(clearval, base + ECC_CTRL_OFST);
  342. writel(0x0, base + ECC_CTRL_OFST);
  343. return 0;
  344. }
  345. #ifdef CONFIG_EDAC_DEBUG
  346. /**
  347. * zynqmp_get_mem_info - Get the current memory info.
  348. * @priv: DDR memory controller private instance data.
  349. *
  350. * Return: host interface address.
  351. */
  352. static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv)
  353. {
  354. u64 hif_addr = 0, linear_addr;
  355. linear_addr = priv->poison_addr;
  356. if (linear_addr >= SZ_32G)
  357. linear_addr = linear_addr - SZ_32G + SZ_2G;
  358. hif_addr = linear_addr >> 3;
  359. return hif_addr;
  360. }
  361. #endif
  362. /**
  363. * zynqmp_get_error_info - Get the current ECC error info.
  364. * @priv: DDR memory controller private instance data.
  365. *
  366. * Return: one if there is no error otherwise returns zero.
  367. */
  368. static int zynqmp_get_error_info(struct synps_edac_priv *priv)
  369. {
  370. struct synps_ecc_status *p;
  371. u32 regval, clearval;
  372. unsigned long flags;
  373. void __iomem *base;
  374. base = priv->baseaddr;
  375. p = &priv->stat;
  376. regval = readl(base + ECC_ERRCNT_OFST);
  377. p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
  378. p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
  379. if (!p->ce_cnt)
  380. goto ue_err;
  381. regval = readl(base + ECC_STAT_OFST);
  382. if (!regval)
  383. return 1;
  384. p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
  385. regval = readl(base + ECC_CEADDR0_OFST);
  386. p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
  387. regval = readl(base + ECC_CEADDR1_OFST);
  388. p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
  389. ECC_CEADDR1_BNKNR_SHIFT;
  390. p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
  391. ECC_CEADDR1_BNKGRP_SHIFT;
  392. p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
  393. p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
  394. edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
  395. readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
  396. readl(base + ECC_CSYND2_OFST));
  397. ue_err:
  398. if (!p->ue_cnt)
  399. goto out;
  400. regval = readl(base + ECC_UEADDR0_OFST);
  401. p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
  402. regval = readl(base + ECC_UEADDR1_OFST);
  403. p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
  404. ECC_CEADDR1_BNKGRP_SHIFT;
  405. p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
  406. ECC_CEADDR1_BNKNR_SHIFT;
  407. p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
  408. p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
  409. out:
  410. spin_lock_irqsave(&priv->reglock, flags);
  411. clearval = readl(base + ECC_CLR_OFST) |
  412. ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
  413. ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
  414. writel(clearval, base + ECC_CLR_OFST);
  415. spin_unlock_irqrestore(&priv->reglock, flags);
  416. return 0;
  417. }
  418. /**
  419. * handle_error - Handle Correctable and Uncorrectable errors.
  420. * @mci: EDAC memory controller instance.
  421. * @p: Synopsys ECC status structure.
  422. *
  423. * Handles ECC correctable and uncorrectable errors.
  424. */
  425. static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
  426. {
  427. struct synps_edac_priv *priv = mci->pvt_info;
  428. struct ecc_error_info *pinf;
  429. if (p->ce_cnt) {
  430. pinf = &p->ceinfo;
  431. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  432. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  433. "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
  434. "CE", pinf->row, pinf->bank,
  435. pinf->bankgrpnr, pinf->blknr,
  436. pinf->bitpos, pinf->data);
  437. } else {
  438. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  439. "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
  440. "CE", pinf->row, pinf->bank, pinf->col,
  441. pinf->bitpos, pinf->data);
  442. }
  443. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  444. p->ce_cnt, 0, 0, 0, 0, 0, -1,
  445. priv->message, "");
  446. }
  447. if (p->ue_cnt) {
  448. pinf = &p->ueinfo;
  449. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  450. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  451. "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
  452. "UE", pinf->row, pinf->bank,
  453. pinf->bankgrpnr, pinf->blknr);
  454. } else {
  455. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  456. "DDR ECC error type :%s Row %d Bank %d Col %d ",
  457. "UE", pinf->row, pinf->bank, pinf->col);
  458. }
  459. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  460. p->ue_cnt, 0, 0, 0, 0, 0, -1,
  461. priv->message, "");
  462. }
  463. memset(p, 0, sizeof(*p));
  464. }
  465. static void enable_intr(struct synps_edac_priv *priv)
  466. {
  467. unsigned long flags;
  468. /* Enable UE/CE Interrupts */
  469. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
  470. writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
  471. priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
  472. return;
  473. }
  474. spin_lock_irqsave(&priv->reglock, flags);
  475. writel(DDR_UE_MASK | DDR_CE_MASK,
  476. priv->baseaddr + ECC_CLR_OFST);
  477. spin_unlock_irqrestore(&priv->reglock, flags);
  478. }
  479. static void disable_intr(struct synps_edac_priv *priv)
  480. {
  481. unsigned long flags;
  482. /* Disable UE/CE Interrupts */
  483. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
  484. writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
  485. priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
  486. return;
  487. }
  488. spin_lock_irqsave(&priv->reglock, flags);
  489. writel(0, priv->baseaddr + ECC_CLR_OFST);
  490. spin_unlock_irqrestore(&priv->reglock, flags);
  491. }
  492. /**
  493. * intr_handler - Interrupt Handler for ECC interrupts.
  494. * @irq: IRQ number.
  495. * @dev_id: Device ID.
  496. *
  497. * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
  498. */
  499. static irqreturn_t intr_handler(int irq, void *dev_id)
  500. {
  501. const struct synps_platform_data *p_data;
  502. struct mem_ctl_info *mci = dev_id;
  503. struct synps_edac_priv *priv;
  504. int status, regval;
  505. priv = mci->pvt_info;
  506. p_data = priv->p_data;
  507. /*
  508. * v3.0 of the controller has the ce/ue bits cleared automatically,
  509. * so this condition does not apply.
  510. */
  511. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
  512. regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
  513. regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
  514. if (!(regval & ECC_CE_UE_INTR_MASK))
  515. return IRQ_NONE;
  516. }
  517. status = p_data->get_error_info(priv);
  518. if (status)
  519. return IRQ_NONE;
  520. priv->ce_cnt += priv->stat.ce_cnt;
  521. priv->ue_cnt += priv->stat.ue_cnt;
  522. handle_error(mci, &priv->stat);
  523. edac_dbg(3, "Total error count CE %d UE %d\n",
  524. priv->ce_cnt, priv->ue_cnt);
  525. /* v3.0 of the controller does not have this register */
  526. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
  527. writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
  528. return IRQ_HANDLED;
  529. }
  530. /**
  531. * check_errors - Check controller for ECC errors.
  532. * @mci: EDAC memory controller instance.
  533. *
  534. * Check and post ECC errors. Called by the polling thread.
  535. */
  536. static void check_errors(struct mem_ctl_info *mci)
  537. {
  538. const struct synps_platform_data *p_data;
  539. struct synps_edac_priv *priv;
  540. int status;
  541. priv = mci->pvt_info;
  542. p_data = priv->p_data;
  543. status = p_data->get_error_info(priv);
  544. if (status)
  545. return;
  546. priv->ce_cnt += priv->stat.ce_cnt;
  547. priv->ue_cnt += priv->stat.ue_cnt;
  548. handle_error(mci, &priv->stat);
  549. edac_dbg(3, "Total error count CE %d UE %d\n",
  550. priv->ce_cnt, priv->ue_cnt);
  551. }
  552. /**
  553. * zynq_get_dtype - Return the controller memory width.
  554. * @base: DDR memory controller base address.
  555. *
  556. * Get the EDAC device type width appropriate for the current controller
  557. * configuration.
  558. *
  559. * Return: a device type width enumeration.
  560. */
  561. static enum dev_type zynq_get_dtype(const void __iomem *base)
  562. {
  563. enum dev_type dt;
  564. u32 width;
  565. width = readl(base + CTRL_OFST);
  566. width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
  567. switch (width) {
  568. case DDRCTL_WDTH_16:
  569. dt = DEV_X2;
  570. break;
  571. case DDRCTL_WDTH_32:
  572. dt = DEV_X4;
  573. break;
  574. default:
  575. dt = DEV_UNKNOWN;
  576. }
  577. return dt;
  578. }
  579. /**
  580. * zynqmp_get_dtype - Return the controller memory width.
  581. * @base: DDR memory controller base address.
  582. *
  583. * Get the EDAC device type width appropriate for the current controller
  584. * configuration.
  585. *
  586. * Return: a device type width enumeration.
  587. */
  588. static enum dev_type zynqmp_get_dtype(const void __iomem *base)
  589. {
  590. enum dev_type dt;
  591. u32 width;
  592. width = readl(base + CTRL_OFST);
  593. width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
  594. switch (width) {
  595. case DDRCTL_EWDTH_16:
  596. dt = DEV_X2;
  597. break;
  598. case DDRCTL_EWDTH_32:
  599. dt = DEV_X4;
  600. break;
  601. case DDRCTL_EWDTH_64:
  602. dt = DEV_X8;
  603. break;
  604. default:
  605. dt = DEV_UNKNOWN;
  606. }
  607. return dt;
  608. }
  609. /**
  610. * zynq_get_ecc_state - Return the controller ECC enable/disable status.
  611. * @base: DDR memory controller base address.
  612. *
  613. * Get the ECC enable/disable status of the controller.
  614. *
  615. * Return: true if enabled, otherwise false.
  616. */
  617. static bool zynq_get_ecc_state(void __iomem *base)
  618. {
  619. enum dev_type dt;
  620. u32 ecctype;
  621. dt = zynq_get_dtype(base);
  622. if (dt == DEV_UNKNOWN)
  623. return false;
  624. ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
  625. if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
  626. return true;
  627. return false;
  628. }
  629. /**
  630. * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
  631. * @base: DDR memory controller base address.
  632. *
  633. * Get the ECC enable/disable status for the controller.
  634. *
  635. * Return: a ECC status boolean i.e true/false - enabled/disabled.
  636. */
  637. static bool zynqmp_get_ecc_state(void __iomem *base)
  638. {
  639. enum dev_type dt;
  640. u32 ecctype;
  641. dt = zynqmp_get_dtype(base);
  642. if (dt == DEV_UNKNOWN)
  643. return false;
  644. ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
  645. if ((ecctype == SCRUB_MODE_SECDED) &&
  646. ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
  647. return true;
  648. return false;
  649. }
  650. /**
  651. * get_memsize - Read the size of the attached memory device.
  652. *
  653. * Return: the memory size in bytes.
  654. */
  655. static u32 get_memsize(void)
  656. {
  657. struct sysinfo inf;
  658. si_meminfo(&inf);
  659. return inf.totalram * inf.mem_unit;
  660. }
  661. /**
  662. * zynq_get_mtype - Return the controller memory type.
  663. * @base: Synopsys ECC status structure.
  664. *
  665. * Get the EDAC memory type appropriate for the current controller
  666. * configuration.
  667. *
  668. * Return: a memory type enumeration.
  669. */
  670. static enum mem_type zynq_get_mtype(const void __iomem *base)
  671. {
  672. enum mem_type mt;
  673. u32 memtype;
  674. memtype = readl(base + T_ZQ_OFST);
  675. if (memtype & T_ZQ_DDRMODE_MASK)
  676. mt = MEM_DDR3;
  677. else
  678. mt = MEM_DDR2;
  679. return mt;
  680. }
  681. /**
  682. * zynqmp_get_mtype - Returns controller memory type.
  683. * @base: Synopsys ECC status structure.
  684. *
  685. * Get the EDAC memory type appropriate for the current controller
  686. * configuration.
  687. *
  688. * Return: a memory type enumeration.
  689. */
  690. static enum mem_type zynqmp_get_mtype(const void __iomem *base)
  691. {
  692. enum mem_type mt;
  693. u32 memtype;
  694. memtype = readl(base + CTRL_OFST);
  695. if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
  696. mt = MEM_DDR3;
  697. else if (memtype & MEM_TYPE_DDR2)
  698. mt = MEM_RDDR2;
  699. else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
  700. mt = MEM_DDR4;
  701. else
  702. mt = MEM_EMPTY;
  703. return mt;
  704. }
  705. /**
  706. * init_csrows - Initialize the csrow data.
  707. * @mci: EDAC memory controller instance.
  708. *
  709. * Initialize the chip select rows associated with the EDAC memory
  710. * controller instance.
  711. */
  712. static void init_csrows(struct mem_ctl_info *mci)
  713. {
  714. struct synps_edac_priv *priv = mci->pvt_info;
  715. const struct synps_platform_data *p_data;
  716. struct csrow_info *csi;
  717. struct dimm_info *dimm;
  718. u32 size, row;
  719. int j;
  720. p_data = priv->p_data;
  721. for (row = 0; row < mci->nr_csrows; row++) {
  722. csi = mci->csrows[row];
  723. size = get_memsize();
  724. for (j = 0; j < csi->nr_channels; j++) {
  725. dimm = csi->channels[j]->dimm;
  726. dimm->edac_mode = EDAC_SECDED;
  727. dimm->mtype = p_data->get_mtype(priv->baseaddr);
  728. dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
  729. dimm->grain = SYNPS_EDAC_ERR_GRAIN;
  730. dimm->dtype = p_data->get_dtype(priv->baseaddr);
  731. }
  732. }
  733. }
  734. /**
  735. * mc_init - Initialize one driver instance.
  736. * @mci: EDAC memory controller instance.
  737. * @pdev: platform device.
  738. *
  739. * Perform initialization of the EDAC memory controller instance and
  740. * related driver-private data associated with the memory controller the
  741. * instance is bound to.
  742. */
  743. static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
  744. {
  745. struct synps_edac_priv *priv;
  746. mci->pdev = &pdev->dev;
  747. priv = mci->pvt_info;
  748. platform_set_drvdata(pdev, mci);
  749. /* Initialize controller capabilities and configuration */
  750. mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
  751. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  752. mci->scrub_cap = SCRUB_HW_SRC;
  753. mci->scrub_mode = SCRUB_NONE;
  754. mci->edac_cap = EDAC_FLAG_SECDED;
  755. mci->ctl_name = "synps_ddr_controller";
  756. mci->dev_name = SYNPS_EDAC_MOD_STRING;
  757. mci->mod_name = SYNPS_EDAC_MOD_VER;
  758. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  759. edac_op_state = EDAC_OPSTATE_INT;
  760. } else {
  761. edac_op_state = EDAC_OPSTATE_POLL;
  762. mci->edac_check = check_errors;
  763. }
  764. mci->ctl_page_to_phys = NULL;
  765. init_csrows(mci);
  766. }
  767. static int setup_irq(struct mem_ctl_info *mci,
  768. struct platform_device *pdev)
  769. {
  770. struct synps_edac_priv *priv = mci->pvt_info;
  771. int ret, irq;
  772. irq = platform_get_irq(pdev, 0);
  773. if (irq < 0) {
  774. edac_printk(KERN_ERR, EDAC_MC,
  775. "No IRQ %d in DT\n", irq);
  776. return irq;
  777. }
  778. ret = devm_request_irq(&pdev->dev, irq, intr_handler,
  779. 0, dev_name(&pdev->dev), mci);
  780. if (ret < 0) {
  781. edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
  782. return ret;
  783. }
  784. enable_intr(priv);
  785. return 0;
  786. }
  787. static const struct synps_platform_data zynq_edac_def = {
  788. .get_error_info = zynq_get_error_info,
  789. .get_mtype = zynq_get_mtype,
  790. .get_dtype = zynq_get_dtype,
  791. .get_ecc_state = zynq_get_ecc_state,
  792. .quirks = 0,
  793. };
  794. static const struct synps_platform_data zynqmp_edac_def = {
  795. .get_error_info = zynqmp_get_error_info,
  796. .get_mtype = zynqmp_get_mtype,
  797. .get_dtype = zynqmp_get_dtype,
  798. .get_ecc_state = zynqmp_get_ecc_state,
  799. #ifdef CONFIG_EDAC_DEBUG
  800. .get_mem_info = zynqmp_get_mem_info,
  801. #endif
  802. .quirks = (DDR_ECC_INTR_SUPPORT
  803. #ifdef CONFIG_EDAC_DEBUG
  804. | DDR_ECC_DATA_POISON_SUPPORT
  805. #endif
  806. ),
  807. };
  808. static const struct synps_platform_data synopsys_edac_def = {
  809. .get_error_info = zynqmp_get_error_info,
  810. .get_mtype = zynqmp_get_mtype,
  811. .get_dtype = zynqmp_get_dtype,
  812. .get_ecc_state = zynqmp_get_ecc_state,
  813. .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
  814. #ifdef CONFIG_EDAC_DEBUG
  815. | DDR_ECC_DATA_POISON_SUPPORT
  816. #endif
  817. ),
  818. };
  819. static const struct of_device_id synps_edac_match[] = {
  820. {
  821. .compatible = "xlnx,zynq-ddrc-a05",
  822. .data = (void *)&zynq_edac_def
  823. },
  824. {
  825. .compatible = "xlnx,zynqmp-ddrc-2.40a",
  826. .data = (void *)&zynqmp_edac_def
  827. },
  828. {
  829. .compatible = "snps,ddrc-3.80a",
  830. .data = (void *)&synopsys_edac_def
  831. },
  832. {
  833. /* end of table */
  834. }
  835. };
  836. MODULE_DEVICE_TABLE(of, synps_edac_match);
  837. #ifdef CONFIG_EDAC_DEBUG
  838. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  839. /**
  840. * ddr_poison_setup - Update poison registers.
  841. * @priv: DDR memory controller private instance data.
  842. *
  843. * Update poison registers as per DDR mapping.
  844. * Return: none.
  845. */
  846. static void ddr_poison_setup(struct synps_edac_priv *priv)
  847. {
  848. int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
  849. const struct synps_platform_data *p_data;
  850. int index;
  851. ulong hif_addr = 0;
  852. p_data = priv->p_data;
  853. if (p_data->get_mem_info)
  854. hif_addr = p_data->get_mem_info(priv);
  855. else
  856. hif_addr = priv->poison_addr >> 3;
  857. for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
  858. if (priv->row_shift[index])
  859. row |= (((hif_addr >> priv->row_shift[index]) &
  860. BIT(0)) << index);
  861. else
  862. break;
  863. }
  864. for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
  865. if (priv->col_shift[index] || index < 3)
  866. col |= (((hif_addr >> priv->col_shift[index]) &
  867. BIT(0)) << index);
  868. else
  869. break;
  870. }
  871. for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
  872. if (priv->bank_shift[index])
  873. bank |= (((hif_addr >> priv->bank_shift[index]) &
  874. BIT(0)) << index);
  875. else
  876. break;
  877. }
  878. for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
  879. if (priv->bankgrp_shift[index])
  880. bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
  881. & BIT(0)) << index);
  882. else
  883. break;
  884. }
  885. if (priv->rank_shift[0])
  886. rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
  887. regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
  888. regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
  889. writel(regval, priv->baseaddr + ECC_POISON0_OFST);
  890. regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
  891. regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
  892. regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
  893. writel(regval, priv->baseaddr + ECC_POISON1_OFST);
  894. }
  895. static ssize_t inject_data_error_show(struct device *dev,
  896. struct device_attribute *mattr,
  897. char *data)
  898. {
  899. struct mem_ctl_info *mci = to_mci(dev);
  900. struct synps_edac_priv *priv = mci->pvt_info;
  901. return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
  902. "Error injection Address: 0x%lx\n\r",
  903. readl(priv->baseaddr + ECC_POISON0_OFST),
  904. readl(priv->baseaddr + ECC_POISON1_OFST),
  905. priv->poison_addr);
  906. }
  907. static ssize_t inject_data_error_store(struct device *dev,
  908. struct device_attribute *mattr,
  909. const char *data, size_t count)
  910. {
  911. struct mem_ctl_info *mci = to_mci(dev);
  912. struct synps_edac_priv *priv = mci->pvt_info;
  913. if (kstrtoul(data, 0, &priv->poison_addr))
  914. return -EINVAL;
  915. ddr_poison_setup(priv);
  916. return count;
  917. }
  918. static ssize_t inject_data_poison_show(struct device *dev,
  919. struct device_attribute *mattr,
  920. char *data)
  921. {
  922. struct mem_ctl_info *mci = to_mci(dev);
  923. struct synps_edac_priv *priv = mci->pvt_info;
  924. return sprintf(data, "Data Poisoning: %s\n\r",
  925. (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
  926. ? ("Correctable Error") : ("UnCorrectable Error"));
  927. }
  928. static ssize_t inject_data_poison_store(struct device *dev,
  929. struct device_attribute *mattr,
  930. const char *data, size_t count)
  931. {
  932. struct mem_ctl_info *mci = to_mci(dev);
  933. struct synps_edac_priv *priv = mci->pvt_info;
  934. writel(0, priv->baseaddr + DDRC_SWCTL);
  935. if (strncmp(data, "CE", 2) == 0)
  936. writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
  937. else
  938. writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
  939. writel(1, priv->baseaddr + DDRC_SWCTL);
  940. return count;
  941. }
  942. static DEVICE_ATTR_RW(inject_data_error);
  943. static DEVICE_ATTR_RW(inject_data_poison);
  944. static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
  945. {
  946. int rc;
  947. rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
  948. if (rc < 0)
  949. return rc;
  950. rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
  951. if (rc < 0)
  952. return rc;
  953. return 0;
  954. }
  955. static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
  956. {
  957. device_remove_file(&mci->dev, &dev_attr_inject_data_error);
  958. device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
  959. }
  960. static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  961. {
  962. u32 addrmap_row_b2_10;
  963. int index;
  964. priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
  965. priv->row_shift[1] = ((addrmap[5] >> 8) &
  966. ROW_MAX_VAL_MASK) + ROW_B1_BASE;
  967. addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
  968. if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
  969. for (index = 2; index < 11; index++)
  970. priv->row_shift[index] = addrmap_row_b2_10 +
  971. index + ROW_B0_BASE;
  972. } else {
  973. priv->row_shift[2] = (addrmap[9] &
  974. ROW_MAX_VAL_MASK) + ROW_B2_BASE;
  975. priv->row_shift[3] = ((addrmap[9] >> 8) &
  976. ROW_MAX_VAL_MASK) + ROW_B3_BASE;
  977. priv->row_shift[4] = ((addrmap[9] >> 16) &
  978. ROW_MAX_VAL_MASK) + ROW_B4_BASE;
  979. priv->row_shift[5] = ((addrmap[9] >> 24) &
  980. ROW_MAX_VAL_MASK) + ROW_B5_BASE;
  981. priv->row_shift[6] = (addrmap[10] &
  982. ROW_MAX_VAL_MASK) + ROW_B6_BASE;
  983. priv->row_shift[7] = ((addrmap[10] >> 8) &
  984. ROW_MAX_VAL_MASK) + ROW_B7_BASE;
  985. priv->row_shift[8] = ((addrmap[10] >> 16) &
  986. ROW_MAX_VAL_MASK) + ROW_B8_BASE;
  987. priv->row_shift[9] = ((addrmap[10] >> 24) &
  988. ROW_MAX_VAL_MASK) + ROW_B9_BASE;
  989. priv->row_shift[10] = (addrmap[11] &
  990. ROW_MAX_VAL_MASK) + ROW_B10_BASE;
  991. }
  992. priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
  993. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
  994. ROW_MAX_VAL_MASK) + ROW_B11_BASE);
  995. priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
  996. ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
  997. ROW_MAX_VAL_MASK) + ROW_B12_BASE);
  998. priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
  999. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
  1000. ROW_MAX_VAL_MASK) + ROW_B13_BASE);
  1001. priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
  1002. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
  1003. ROW_MAX_VAL_MASK) + ROW_B14_BASE);
  1004. priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
  1005. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
  1006. ROW_MAX_VAL_MASK) + ROW_B15_BASE);
  1007. priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
  1008. ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
  1009. ROW_MAX_VAL_MASK) + ROW_B16_BASE);
  1010. priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
  1011. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
  1012. ROW_MAX_VAL_MASK) + ROW_B17_BASE);
  1013. }
  1014. static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1015. {
  1016. u32 width, memtype;
  1017. int index;
  1018. memtype = readl(priv->baseaddr + CTRL_OFST);
  1019. width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
  1020. priv->col_shift[0] = 0;
  1021. priv->col_shift[1] = 1;
  1022. priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
  1023. priv->col_shift[3] = ((addrmap[2] >> 8) &
  1024. COL_MAX_VAL_MASK) + COL_B3_BASE;
  1025. priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
  1026. COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
  1027. COL_MAX_VAL_MASK) + COL_B4_BASE);
  1028. priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
  1029. COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
  1030. COL_MAX_VAL_MASK) + COL_B5_BASE);
  1031. priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
  1032. COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
  1033. COL_MAX_VAL_MASK) + COL_B6_BASE);
  1034. priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
  1035. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
  1036. COL_MAX_VAL_MASK) + COL_B7_BASE);
  1037. priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
  1038. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
  1039. COL_MAX_VAL_MASK) + COL_B8_BASE);
  1040. priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
  1041. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
  1042. COL_MAX_VAL_MASK) + COL_B9_BASE);
  1043. if (width == DDRCTL_EWDTH_64) {
  1044. if (memtype & MEM_TYPE_LPDDR3) {
  1045. priv->col_shift[10] = ((addrmap[4] &
  1046. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1047. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1048. COL_B10_BASE);
  1049. priv->col_shift[11] = (((addrmap[4] >> 8) &
  1050. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1051. (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
  1052. COL_B11_BASE);
  1053. } else {
  1054. priv->col_shift[11] = ((addrmap[4] &
  1055. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1056. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1057. COL_B10_BASE);
  1058. priv->col_shift[13] = (((addrmap[4] >> 8) &
  1059. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1060. (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
  1061. COL_B11_BASE);
  1062. }
  1063. } else if (width == DDRCTL_EWDTH_32) {
  1064. if (memtype & MEM_TYPE_LPDDR3) {
  1065. priv->col_shift[10] = (((addrmap[3] >> 24) &
  1066. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1067. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1068. COL_B9_BASE);
  1069. priv->col_shift[11] = ((addrmap[4] &
  1070. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1071. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1072. COL_B10_BASE);
  1073. } else {
  1074. priv->col_shift[11] = (((addrmap[3] >> 24) &
  1075. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1076. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1077. COL_B9_BASE);
  1078. priv->col_shift[13] = ((addrmap[4] &
  1079. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1080. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1081. COL_B10_BASE);
  1082. }
  1083. } else {
  1084. if (memtype & MEM_TYPE_LPDDR3) {
  1085. priv->col_shift[10] = (((addrmap[3] >> 16) &
  1086. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1087. (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
  1088. COL_B8_BASE);
  1089. priv->col_shift[11] = (((addrmap[3] >> 24) &
  1090. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1091. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1092. COL_B9_BASE);
  1093. priv->col_shift[13] = ((addrmap[4] &
  1094. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1095. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1096. COL_B10_BASE);
  1097. } else {
  1098. priv->col_shift[11] = (((addrmap[3] >> 16) &
  1099. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1100. (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
  1101. COL_B8_BASE);
  1102. priv->col_shift[13] = (((addrmap[3] >> 24) &
  1103. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1104. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1105. COL_B9_BASE);
  1106. }
  1107. }
  1108. if (width) {
  1109. for (index = 9; index > width; index--) {
  1110. priv->col_shift[index] = priv->col_shift[index - width];
  1111. priv->col_shift[index - width] = 0;
  1112. }
  1113. }
  1114. }
  1115. static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1116. {
  1117. priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
  1118. priv->bank_shift[1] = ((addrmap[1] >> 8) &
  1119. BANK_MAX_VAL_MASK) + BANK_B1_BASE;
  1120. priv->bank_shift[2] = (((addrmap[1] >> 16) &
  1121. BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
  1122. (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
  1123. BANK_B2_BASE);
  1124. }
  1125. static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1126. {
  1127. priv->bankgrp_shift[0] = (addrmap[8] &
  1128. BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
  1129. priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
  1130. BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
  1131. & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
  1132. }
  1133. static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1134. {
  1135. priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
  1136. RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
  1137. RANK_MAX_VAL_MASK) + RANK_B0_BASE);
  1138. }
  1139. /**
  1140. * setup_address_map - Set Address Map by querying ADDRMAP registers.
  1141. * @priv: DDR memory controller private instance data.
  1142. *
  1143. * Set Address Map by querying ADDRMAP registers.
  1144. *
  1145. * Return: none.
  1146. */
  1147. static void setup_address_map(struct synps_edac_priv *priv)
  1148. {
  1149. u32 addrmap[12];
  1150. int index;
  1151. for (index = 0; index < 12; index++) {
  1152. u32 addrmap_offset;
  1153. addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
  1154. addrmap[index] = readl(priv->baseaddr + addrmap_offset);
  1155. }
  1156. setup_row_address_map(priv, addrmap);
  1157. setup_column_address_map(priv, addrmap);
  1158. setup_bank_address_map(priv, addrmap);
  1159. setup_bg_address_map(priv, addrmap);
  1160. setup_rank_address_map(priv, addrmap);
  1161. }
  1162. #endif /* CONFIG_EDAC_DEBUG */
  1163. /**
  1164. * mc_probe - Check controller and bind driver.
  1165. * @pdev: platform device.
  1166. *
  1167. * Probe a specific controller instance for binding with the driver.
  1168. *
  1169. * Return: 0 if the controller instance was successfully bound to the
  1170. * driver; otherwise, < 0 on error.
  1171. */
  1172. static int mc_probe(struct platform_device *pdev)
  1173. {
  1174. const struct synps_platform_data *p_data;
  1175. struct edac_mc_layer layers[2];
  1176. struct synps_edac_priv *priv;
  1177. struct mem_ctl_info *mci;
  1178. void __iomem *baseaddr;
  1179. int rc;
  1180. baseaddr = devm_platform_ioremap_resource(pdev, 0);
  1181. if (IS_ERR(baseaddr))
  1182. return PTR_ERR(baseaddr);
  1183. p_data = of_device_get_match_data(&pdev->dev);
  1184. if (!p_data)
  1185. return -ENODEV;
  1186. if (!p_data->get_ecc_state(baseaddr)) {
  1187. edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
  1188. return -ENXIO;
  1189. }
  1190. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  1191. layers[0].size = SYNPS_EDAC_NR_CSROWS;
  1192. layers[0].is_virt_csrow = true;
  1193. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  1194. layers[1].size = SYNPS_EDAC_NR_CHANS;
  1195. layers[1].is_virt_csrow = false;
  1196. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
  1197. sizeof(struct synps_edac_priv));
  1198. if (!mci) {
  1199. edac_printk(KERN_ERR, EDAC_MC,
  1200. "Failed memory allocation for mc instance\n");
  1201. return -ENOMEM;
  1202. }
  1203. priv = mci->pvt_info;
  1204. priv->baseaddr = baseaddr;
  1205. priv->p_data = p_data;
  1206. spin_lock_init(&priv->reglock);
  1207. mc_init(mci, pdev);
  1208. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  1209. rc = setup_irq(mci, pdev);
  1210. if (rc)
  1211. goto free_edac_mc;
  1212. }
  1213. rc = edac_mc_add_mc(mci);
  1214. if (rc) {
  1215. edac_printk(KERN_ERR, EDAC_MC,
  1216. "Failed to register with EDAC core\n");
  1217. goto free_edac_mc;
  1218. }
  1219. #ifdef CONFIG_EDAC_DEBUG
  1220. if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
  1221. rc = edac_create_sysfs_attributes(mci);
  1222. if (rc) {
  1223. edac_printk(KERN_ERR, EDAC_MC,
  1224. "Failed to create sysfs entries\n");
  1225. goto free_edac_mc;
  1226. }
  1227. }
  1228. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
  1229. setup_address_map(priv);
  1230. #endif
  1231. /*
  1232. * Start capturing the correctable and uncorrectable errors. A write of
  1233. * 0 starts the counters.
  1234. */
  1235. if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
  1236. writel(0x0, baseaddr + ECC_CTRL_OFST);
  1237. return rc;
  1238. free_edac_mc:
  1239. edac_mc_free(mci);
  1240. return rc;
  1241. }
  1242. /**
  1243. * mc_remove - Unbind driver from controller.
  1244. * @pdev: Platform device.
  1245. *
  1246. * Return: Unconditionally 0
  1247. */
  1248. static void mc_remove(struct platform_device *pdev)
  1249. {
  1250. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  1251. struct synps_edac_priv *priv = mci->pvt_info;
  1252. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
  1253. disable_intr(priv);
  1254. #ifdef CONFIG_EDAC_DEBUG
  1255. if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
  1256. edac_remove_sysfs_attributes(mci);
  1257. #endif
  1258. edac_mc_del_mc(&pdev->dev);
  1259. edac_mc_free(mci);
  1260. }
  1261. static struct platform_driver synps_edac_mc_driver = {
  1262. .driver = {
  1263. .name = "synopsys-edac",
  1264. .of_match_table = synps_edac_match,
  1265. },
  1266. .probe = mc_probe,
  1267. .remove_new = mc_remove,
  1268. };
  1269. module_platform_driver(synps_edac_mc_driver);
  1270. MODULE_AUTHOR("Xilinx Inc");
  1271. MODULE_DESCRIPTION("Synopsys DDR ECC driver");
  1272. MODULE_LICENSE("GPL v2");