versal_edac.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xilinx Versal memory controller driver
  4. * Copyright (C) 2023 Advanced Micro Devices, Inc.
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/edac.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/of.h>
  11. #include <linux/of_address.h>
  12. #include <linux/of_device.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/sizes.h>
  15. #include <linux/firmware/xlnx-zynqmp.h>
  16. #include <linux/firmware/xlnx-event-manager.h>
  17. #include "edac_module.h"
  18. /* Granularity of reported error in bytes */
  19. #define XDDR_EDAC_ERR_GRAIN 1
  20. #define XDDR_EDAC_MSG_SIZE 256
  21. #define EVENT 2
  22. #define XDDR_PCSR_OFFSET 0xC
  23. #define XDDR_ISR_OFFSET 0x14
  24. #define XDDR_IRQ_EN_OFFSET 0x20
  25. #define XDDR_IRQ1_EN_OFFSET 0x2C
  26. #define XDDR_IRQ_DIS_OFFSET 0x24
  27. #define XDDR_IRQ_CE_MASK GENMASK(18, 15)
  28. #define XDDR_IRQ_UE_MASK GENMASK(14, 11)
  29. #define XDDR_REG_CONFIG0_OFFSET 0x258
  30. #define XDDR_REG_CONFIG0_BUS_WIDTH_MASK GENMASK(19, 18)
  31. #define XDDR_REG_CONFIG0_NUM_CHANS_MASK BIT(17)
  32. #define XDDR_REG_CONFIG0_NUM_RANKS_MASK GENMASK(15, 14)
  33. #define XDDR_REG_CONFIG0_SIZE_MASK GENMASK(10, 8)
  34. #define XDDR_REG_PINOUT_OFFSET 0x25C
  35. #define XDDR_REG_PINOUT_ECC_EN_MASK GENMASK(7, 5)
  36. #define ECCW0_FLIP_CTRL 0x109C
  37. #define ECCW0_FLIP0_OFFSET 0x10A0
  38. #define ECCW0_FLIP0_BITS 31
  39. #define ECCW0_FLIP1_OFFSET 0x10A4
  40. #define ECCW1_FLIP_CTRL 0x10AC
  41. #define ECCW1_FLIP0_OFFSET 0x10B0
  42. #define ECCW1_FLIP1_OFFSET 0x10B4
  43. #define ECCR0_CERR_STAT_OFFSET 0x10BC
  44. #define ECCR0_CE_ADDR_LO_OFFSET 0x10C0
  45. #define ECCR0_CE_ADDR_HI_OFFSET 0x10C4
  46. #define ECCR0_CE_DATA_LO_OFFSET 0x10C8
  47. #define ECCR0_CE_DATA_HI_OFFSET 0x10CC
  48. #define ECCR0_CE_DATA_PAR_OFFSET 0x10D0
  49. #define ECCR0_UERR_STAT_OFFSET 0x10D4
  50. #define ECCR0_UE_ADDR_LO_OFFSET 0x10D8
  51. #define ECCR0_UE_ADDR_HI_OFFSET 0x10DC
  52. #define ECCR0_UE_DATA_LO_OFFSET 0x10E0
  53. #define ECCR0_UE_DATA_HI_OFFSET 0x10E4
  54. #define ECCR0_UE_DATA_PAR_OFFSET 0x10E8
  55. #define ECCR1_CERR_STAT_OFFSET 0x10F4
  56. #define ECCR1_CE_ADDR_LO_OFFSET 0x10F8
  57. #define ECCR1_CE_ADDR_HI_OFFSET 0x10FC
  58. #define ECCR1_CE_DATA_LO_OFFSET 0x1100
  59. #define ECCR1_CE_DATA_HI_OFFSET 0x110C
  60. #define ECCR1_CE_DATA_PAR_OFFSET 0x1108
  61. #define ECCR1_UERR_STAT_OFFSET 0x110C
  62. #define ECCR1_UE_ADDR_LO_OFFSET 0x1110
  63. #define ECCR1_UE_ADDR_HI_OFFSET 0x1114
  64. #define ECCR1_UE_DATA_LO_OFFSET 0x1118
  65. #define ECCR1_UE_DATA_HI_OFFSET 0x111C
  66. #define ECCR1_UE_DATA_PAR_OFFSET 0x1120
  67. #define XDDR_NOC_REG_ADEC4_OFFSET 0x44
  68. #define RANK_1_MASK GENMASK(11, 6)
  69. #define LRANK_0_MASK GENMASK(17, 12)
  70. #define LRANK_1_MASK GENMASK(23, 18)
  71. #define MASK_24 GENMASK(29, 24)
  72. #define XDDR_NOC_REG_ADEC5_OFFSET 0x48
  73. #define XDDR_NOC_REG_ADEC6_OFFSET 0x4C
  74. #define XDDR_NOC_REG_ADEC7_OFFSET 0x50
  75. #define XDDR_NOC_REG_ADEC8_OFFSET 0x54
  76. #define XDDR_NOC_REG_ADEC9_OFFSET 0x58
  77. #define XDDR_NOC_REG_ADEC10_OFFSET 0x5C
  78. #define XDDR_NOC_REG_ADEC11_OFFSET 0x60
  79. #define MASK_0 GENMASK(5, 0)
  80. #define GRP_0_MASK GENMASK(11, 6)
  81. #define GRP_1_MASK GENMASK(17, 12)
  82. #define CH_0_MASK GENMASK(23, 18)
  83. #define XDDR_NOC_REG_ADEC12_OFFSET 0x71C
  84. #define XDDR_NOC_REG_ADEC13_OFFSET 0x720
  85. #define XDDR_NOC_REG_ADEC14_OFFSET 0x724
  86. #define XDDR_NOC_ROW_MATCH_MASK GENMASK(17, 0)
  87. #define XDDR_NOC_COL_MATCH_MASK GENMASK(27, 18)
  88. #define XDDR_NOC_BANK_MATCH_MASK GENMASK(29, 28)
  89. #define XDDR_NOC_GRP_MATCH_MASK GENMASK(31, 30)
  90. #define XDDR_NOC_REG_ADEC15_OFFSET 0x728
  91. #define XDDR_NOC_RANK_MATCH_MASK GENMASK(1, 0)
  92. #define XDDR_NOC_LRANK_MATCH_MASK GENMASK(4, 2)
  93. #define XDDR_NOC_CH_MATCH_MASK BIT(5)
  94. #define XDDR_NOC_MOD_SEL_MASK BIT(6)
  95. #define XDDR_NOC_MATCH_EN_MASK BIT(8)
  96. #define ECCR_UE_CE_ADDR_HI_ROW_MASK GENMASK(7, 0)
  97. #define XDDR_EDAC_NR_CSROWS 1
  98. #define XDDR_EDAC_NR_CHANS 1
  99. #define XDDR_BUS_WIDTH_64 0
  100. #define XDDR_BUS_WIDTH_32 1
  101. #define XDDR_BUS_WIDTH_16 2
  102. #define XDDR_MAX_ROW_CNT 18
  103. #define XDDR_MAX_COL_CNT 10
  104. #define XDDR_MAX_RANK_CNT 2
  105. #define XDDR_MAX_LRANK_CNT 3
  106. #define XDDR_MAX_BANK_CNT 2
  107. #define XDDR_MAX_GRP_CNT 2
  108. /*
  109. * Config and system registers are usually locked. This is the
  110. * code which unlocks them in order to accept writes. See
  111. *
  112. * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register
  113. */
  114. #define PCSR_UNLOCK_VAL 0xF9E8D7C6
  115. #define PCSR_LOCK_VAL 1
  116. #define XDDR_ERR_TYPE_CE 0
  117. #define XDDR_ERR_TYPE_UE 1
  118. #define XILINX_DRAM_SIZE_4G 0
  119. #define XILINX_DRAM_SIZE_6G 1
  120. #define XILINX_DRAM_SIZE_8G 2
  121. #define XILINX_DRAM_SIZE_12G 3
  122. #define XILINX_DRAM_SIZE_16G 4
  123. #define XILINX_DRAM_SIZE_32G 5
  124. #define NUM_UE_BITPOS 2
  125. /**
  126. * struct ecc_error_info - ECC error log information.
  127. * @burstpos: Burst position.
  128. * @lrank: Logical Rank number.
  129. * @rank: Rank number.
  130. * @group: Group number.
  131. * @bank: Bank number.
  132. * @col: Column number.
  133. * @row: Row number.
  134. * @rowhi: Row number higher bits.
  135. * @i: ECC error info.
  136. */
  137. union ecc_error_info {
  138. struct {
  139. u32 burstpos:3;
  140. u32 lrank:3;
  141. u32 rank:2;
  142. u32 group:2;
  143. u32 bank:2;
  144. u32 col:10;
  145. u32 row:10;
  146. u32 rowhi;
  147. };
  148. u64 i;
  149. } __packed;
  150. union edac_info {
  151. struct {
  152. u32 row0:6;
  153. u32 row1:6;
  154. u32 row2:6;
  155. u32 row3:6;
  156. u32 row4:6;
  157. u32 reserved:2;
  158. };
  159. struct {
  160. u32 col1:6;
  161. u32 col2:6;
  162. u32 col3:6;
  163. u32 col4:6;
  164. u32 col5:6;
  165. u32 reservedcol:2;
  166. };
  167. u32 i;
  168. } __packed;
  169. /**
  170. * struct ecc_status - ECC status information to report.
  171. * @ceinfo: Correctable error log information.
  172. * @ueinfo: Uncorrectable error log information.
  173. * @channel: Channel number.
  174. * @error_type: Error type information.
  175. */
  176. struct ecc_status {
  177. union ecc_error_info ceinfo[2];
  178. union ecc_error_info ueinfo[2];
  179. u8 channel;
  180. u8 error_type;
  181. };
  182. /**
  183. * struct edac_priv - DDR memory controller private instance data.
  184. * @ddrmc_baseaddr: Base address of the DDR controller.
  185. * @ddrmc_noc_baseaddr: Base address of the DDRMC NOC.
  186. * @message: Buffer for framing the event specific info.
  187. * @mc_id: Memory controller ID.
  188. * @ce_cnt: Correctable error count.
  189. * @ue_cnt: UnCorrectable error count.
  190. * @stat: ECC status information.
  191. * @lrank_bit: Bit shifts for lrank bit.
  192. * @rank_bit: Bit shifts for rank bit.
  193. * @row_bit: Bit shifts for row bit.
  194. * @col_bit: Bit shifts for column bit.
  195. * @bank_bit: Bit shifts for bank bit.
  196. * @grp_bit: Bit shifts for group bit.
  197. * @ch_bit: Bit shifts for channel bit.
  198. * @err_inject_addr: Data poison address.
  199. * @debugfs: Debugfs handle.
  200. */
  201. struct edac_priv {
  202. void __iomem *ddrmc_baseaddr;
  203. void __iomem *ddrmc_noc_baseaddr;
  204. char message[XDDR_EDAC_MSG_SIZE];
  205. u32 mc_id;
  206. u32 ce_cnt;
  207. u32 ue_cnt;
  208. struct ecc_status stat;
  209. u32 lrank_bit[3];
  210. u32 rank_bit[2];
  211. u32 row_bit[18];
  212. u32 col_bit[10];
  213. u32 bank_bit[2];
  214. u32 grp_bit[2];
  215. u32 ch_bit;
  216. #ifdef CONFIG_EDAC_DEBUG
  217. u64 err_inject_addr;
  218. struct dentry *debugfs;
  219. #endif
  220. };
  221. static void get_ce_error_info(struct edac_priv *priv)
  222. {
  223. void __iomem *ddrmc_base;
  224. struct ecc_status *p;
  225. u32 regval;
  226. u64 reghi;
  227. ddrmc_base = priv->ddrmc_baseaddr;
  228. p = &priv->stat;
  229. p->error_type = XDDR_ERR_TYPE_CE;
  230. regval = readl(ddrmc_base + ECCR0_CE_ADDR_LO_OFFSET);
  231. reghi = regval & ECCR_UE_CE_ADDR_HI_ROW_MASK;
  232. p->ceinfo[0].i = regval | reghi << 32;
  233. regval = readl(ddrmc_base + ECCR0_CE_ADDR_HI_OFFSET);
  234. edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
  235. readl(ddrmc_base + ECCR0_CE_DATA_LO_OFFSET),
  236. readl(ddrmc_base + ECCR0_CE_DATA_HI_OFFSET),
  237. readl(ddrmc_base + ECCR0_CE_DATA_PAR_OFFSET));
  238. regval = readl(ddrmc_base + ECCR1_CE_ADDR_LO_OFFSET);
  239. reghi = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
  240. p->ceinfo[1].i = regval | reghi << 32;
  241. regval = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
  242. edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
  243. readl(ddrmc_base + ECCR1_CE_DATA_LO_OFFSET),
  244. readl(ddrmc_base + ECCR1_CE_DATA_HI_OFFSET),
  245. readl(ddrmc_base + ECCR1_CE_DATA_PAR_OFFSET));
  246. }
  247. static void get_ue_error_info(struct edac_priv *priv)
  248. {
  249. void __iomem *ddrmc_base;
  250. struct ecc_status *p;
  251. u32 regval;
  252. u64 reghi;
  253. ddrmc_base = priv->ddrmc_baseaddr;
  254. p = &priv->stat;
  255. p->error_type = XDDR_ERR_TYPE_UE;
  256. regval = readl(ddrmc_base + ECCR0_UE_ADDR_LO_OFFSET);
  257. reghi = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
  258. p->ueinfo[0].i = regval | reghi << 32;
  259. regval = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
  260. edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
  261. readl(ddrmc_base + ECCR0_UE_DATA_LO_OFFSET),
  262. readl(ddrmc_base + ECCR0_UE_DATA_HI_OFFSET),
  263. readl(ddrmc_base + ECCR0_UE_DATA_PAR_OFFSET));
  264. regval = readl(ddrmc_base + ECCR1_UE_ADDR_LO_OFFSET);
  265. reghi = readl(ddrmc_base + ECCR1_UE_ADDR_HI_OFFSET);
  266. p->ueinfo[1].i = regval | reghi << 32;
  267. edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
  268. readl(ddrmc_base + ECCR1_UE_DATA_LO_OFFSET),
  269. readl(ddrmc_base + ECCR1_UE_DATA_HI_OFFSET),
  270. readl(ddrmc_base + ECCR1_UE_DATA_PAR_OFFSET));
  271. }
  272. static bool get_error_info(struct edac_priv *priv)
  273. {
  274. u32 eccr0_ceval, eccr1_ceval, eccr0_ueval, eccr1_ueval;
  275. void __iomem *ddrmc_base;
  276. struct ecc_status *p;
  277. ddrmc_base = priv->ddrmc_baseaddr;
  278. p = &priv->stat;
  279. eccr0_ceval = readl(ddrmc_base + ECCR0_CERR_STAT_OFFSET);
  280. eccr1_ceval = readl(ddrmc_base + ECCR1_CERR_STAT_OFFSET);
  281. eccr0_ueval = readl(ddrmc_base + ECCR0_UERR_STAT_OFFSET);
  282. eccr1_ueval = readl(ddrmc_base + ECCR1_UERR_STAT_OFFSET);
  283. if (!eccr0_ceval && !eccr1_ceval && !eccr0_ueval && !eccr1_ueval)
  284. return 1;
  285. if (!eccr0_ceval)
  286. p->channel = 1;
  287. else
  288. p->channel = 0;
  289. if (eccr0_ceval || eccr1_ceval)
  290. get_ce_error_info(priv);
  291. if (eccr0_ueval || eccr1_ueval) {
  292. if (!eccr0_ueval)
  293. p->channel = 1;
  294. else
  295. p->channel = 0;
  296. get_ue_error_info(priv);
  297. }
  298. /* Unlock the PCSR registers */
  299. writel(PCSR_UNLOCK_VAL, ddrmc_base + XDDR_PCSR_OFFSET);
  300. writel(0, ddrmc_base + ECCR0_CERR_STAT_OFFSET);
  301. writel(0, ddrmc_base + ECCR1_CERR_STAT_OFFSET);
  302. writel(0, ddrmc_base + ECCR0_UERR_STAT_OFFSET);
  303. writel(0, ddrmc_base + ECCR1_UERR_STAT_OFFSET);
  304. /* Lock the PCSR registers */
  305. writel(1, ddrmc_base + XDDR_PCSR_OFFSET);
  306. return 0;
  307. }
  308. /**
  309. * convert_to_physical - Convert to physical address.
  310. * @priv: DDR memory controller private instance data.
  311. * @pinf: ECC error info structure.
  312. *
  313. * Return: Physical address of the DDR memory.
  314. */
  315. static unsigned long convert_to_physical(struct edac_priv *priv, union ecc_error_info pinf)
  316. {
  317. unsigned long err_addr = 0;
  318. u32 index;
  319. u32 row;
  320. row = pinf.rowhi << 10 | pinf.row;
  321. for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
  322. err_addr |= (row & BIT(0)) << priv->row_bit[index];
  323. row >>= 1;
  324. }
  325. for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
  326. err_addr |= (pinf.col & BIT(0)) << priv->col_bit[index];
  327. pinf.col >>= 1;
  328. }
  329. for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
  330. err_addr |= (pinf.bank & BIT(0)) << priv->bank_bit[index];
  331. pinf.bank >>= 1;
  332. }
  333. for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
  334. err_addr |= (pinf.group & BIT(0)) << priv->grp_bit[index];
  335. pinf.group >>= 1;
  336. }
  337. for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
  338. err_addr |= (pinf.rank & BIT(0)) << priv->rank_bit[index];
  339. pinf.rank >>= 1;
  340. }
  341. for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
  342. err_addr |= (pinf.lrank & BIT(0)) << priv->lrank_bit[index];
  343. pinf.lrank >>= 1;
  344. }
  345. err_addr |= (priv->stat.channel & BIT(0)) << priv->ch_bit;
  346. return err_addr;
  347. }
  348. /**
  349. * handle_error - Handle Correctable and Uncorrectable errors.
  350. * @mci: EDAC memory controller instance.
  351. * @stat: ECC status structure.
  352. *
  353. * Handles ECC correctable and uncorrectable errors.
  354. */
  355. static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
  356. {
  357. struct edac_priv *priv = mci->pvt_info;
  358. union ecc_error_info pinf;
  359. if (stat->error_type == XDDR_ERR_TYPE_CE) {
  360. priv->ce_cnt++;
  361. pinf = stat->ceinfo[stat->channel];
  362. snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
  363. "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
  364. "CE", priv->mc_id,
  365. convert_to_physical(priv, pinf), pinf.burstpos);
  366. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  367. 1, 0, 0, 0, 0, 0, -1,
  368. priv->message, "");
  369. }
  370. if (stat->error_type == XDDR_ERR_TYPE_UE) {
  371. priv->ue_cnt++;
  372. pinf = stat->ueinfo[stat->channel];
  373. snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
  374. "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
  375. "UE", priv->mc_id,
  376. convert_to_physical(priv, pinf), pinf.burstpos);
  377. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  378. 1, 0, 0, 0, 0, 0, -1,
  379. priv->message, "");
  380. }
  381. memset(stat, 0, sizeof(*stat));
  382. }
  383. /**
  384. * err_callback - Handle Correctable and Uncorrectable errors.
  385. * @payload: payload data.
  386. * @data: mci controller data.
  387. *
  388. * Handles ECC correctable and uncorrectable errors.
  389. */
  390. static void err_callback(const u32 *payload, void *data)
  391. {
  392. struct mem_ctl_info *mci = (struct mem_ctl_info *)data;
  393. struct edac_priv *priv;
  394. struct ecc_status *p;
  395. int regval;
  396. priv = mci->pvt_info;
  397. p = &priv->stat;
  398. regval = readl(priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
  399. if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_CR)
  400. p->error_type = XDDR_ERR_TYPE_CE;
  401. if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_NCR)
  402. p->error_type = XDDR_ERR_TYPE_UE;
  403. if (get_error_info(priv))
  404. return;
  405. handle_error(mci, &priv->stat);
  406. /* Unlock the PCSR registers */
  407. writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  408. /* Clear the ISR */
  409. writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
  410. /* Lock the PCSR registers */
  411. writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  412. edac_dbg(3, "Total error count CE %d UE %d\n",
  413. priv->ce_cnt, priv->ue_cnt);
  414. }
  415. /**
  416. * get_dwidth - Return the controller memory width.
  417. * @base: DDR memory controller base address.
  418. *
  419. * Get the EDAC device type width appropriate for the controller
  420. * configuration.
  421. *
  422. * Return: a device type width enumeration.
  423. */
  424. static enum dev_type get_dwidth(const void __iomem *base)
  425. {
  426. enum dev_type dt;
  427. u32 regval;
  428. u32 width;
  429. regval = readl(base + XDDR_REG_CONFIG0_OFFSET);
  430. width = FIELD_GET(XDDR_REG_CONFIG0_BUS_WIDTH_MASK, regval);
  431. switch (width) {
  432. case XDDR_BUS_WIDTH_16:
  433. dt = DEV_X2;
  434. break;
  435. case XDDR_BUS_WIDTH_32:
  436. dt = DEV_X4;
  437. break;
  438. case XDDR_BUS_WIDTH_64:
  439. dt = DEV_X8;
  440. break;
  441. default:
  442. dt = DEV_UNKNOWN;
  443. }
  444. return dt;
  445. }
  446. /**
  447. * get_ecc_state - Return the controller ECC enable/disable status.
  448. * @base: DDR memory controller base address.
  449. *
  450. * Get the ECC enable/disable status for the controller.
  451. *
  452. * Return: a ECC status boolean i.e true/false - enabled/disabled.
  453. */
  454. static bool get_ecc_state(void __iomem *base)
  455. {
  456. enum dev_type dt;
  457. u32 ecctype;
  458. dt = get_dwidth(base);
  459. if (dt == DEV_UNKNOWN)
  460. return false;
  461. ecctype = readl(base + XDDR_REG_PINOUT_OFFSET);
  462. ecctype &= XDDR_REG_PINOUT_ECC_EN_MASK;
  463. return !!ecctype;
  464. }
  465. /**
  466. * get_memsize - Get the size of the attached memory device.
  467. * @priv: DDR memory controller private instance data.
  468. *
  469. * Return: the memory size in bytes.
  470. */
  471. static u64 get_memsize(struct edac_priv *priv)
  472. {
  473. u32 regval;
  474. u64 size;
  475. regval = readl(priv->ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
  476. regval = FIELD_GET(XDDR_REG_CONFIG0_SIZE_MASK, regval);
  477. switch (regval) {
  478. case XILINX_DRAM_SIZE_4G:
  479. size = 4U; break;
  480. case XILINX_DRAM_SIZE_6G:
  481. size = 6U; break;
  482. case XILINX_DRAM_SIZE_8G:
  483. size = 8U; break;
  484. case XILINX_DRAM_SIZE_12G:
  485. size = 12U; break;
  486. case XILINX_DRAM_SIZE_16G:
  487. size = 16U; break;
  488. case XILINX_DRAM_SIZE_32G:
  489. size = 32U; break;
  490. /* Invalid configuration */
  491. default:
  492. size = 0; break;
  493. }
  494. size *= SZ_1G;
  495. return size;
  496. }
  497. /**
  498. * init_csrows - Initialize the csrow data.
  499. * @mci: EDAC memory controller instance.
  500. *
  501. * Initialize the chip select rows associated with the EDAC memory
  502. * controller instance.
  503. */
  504. static void init_csrows(struct mem_ctl_info *mci)
  505. {
  506. struct edac_priv *priv = mci->pvt_info;
  507. struct csrow_info *csi;
  508. struct dimm_info *dimm;
  509. unsigned long size;
  510. u32 row;
  511. int ch;
  512. size = get_memsize(priv);
  513. for (row = 0; row < mci->nr_csrows; row++) {
  514. csi = mci->csrows[row];
  515. for (ch = 0; ch < csi->nr_channels; ch++) {
  516. dimm = csi->channels[ch]->dimm;
  517. dimm->edac_mode = EDAC_SECDED;
  518. dimm->mtype = MEM_DDR4;
  519. dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
  520. dimm->grain = XDDR_EDAC_ERR_GRAIN;
  521. dimm->dtype = get_dwidth(priv->ddrmc_baseaddr);
  522. }
  523. }
  524. }
  525. /**
  526. * mc_init - Initialize one driver instance.
  527. * @mci: EDAC memory controller instance.
  528. * @pdev: platform device.
  529. *
  530. * Perform initialization of the EDAC memory controller instance and
  531. * related driver-private data associated with the memory controller the
  532. * instance is bound to.
  533. */
  534. static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
  535. {
  536. mci->pdev = &pdev->dev;
  537. platform_set_drvdata(pdev, mci);
  538. /* Initialize controller capabilities and configuration */
  539. mci->mtype_cap = MEM_FLAG_DDR4;
  540. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  541. mci->scrub_cap = SCRUB_HW_SRC;
  542. mci->scrub_mode = SCRUB_NONE;
  543. mci->edac_cap = EDAC_FLAG_SECDED;
  544. mci->ctl_name = "xlnx_ddr_controller";
  545. mci->dev_name = dev_name(&pdev->dev);
  546. mci->mod_name = "xlnx_edac";
  547. edac_op_state = EDAC_OPSTATE_INT;
  548. init_csrows(mci);
  549. }
  550. static void enable_intr(struct edac_priv *priv)
  551. {
  552. /* Unlock the PCSR registers */
  553. writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  554. /* Enable UE and CE Interrupts to support the interrupt case */
  555. writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
  556. priv->ddrmc_baseaddr + XDDR_IRQ_EN_OFFSET);
  557. writel(XDDR_IRQ_UE_MASK,
  558. priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET);
  559. /* Lock the PCSR registers */
  560. writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  561. }
  562. static void disable_intr(struct edac_priv *priv)
  563. {
  564. /* Unlock the PCSR registers */
  565. writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  566. /* Disable UE/CE Interrupts */
  567. writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
  568. priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET);
  569. /* Lock the PCSR registers */
  570. writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  571. }
  572. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  573. #ifdef CONFIG_EDAC_DEBUG
  574. /**
  575. * poison_setup - Update poison registers.
  576. * @priv: DDR memory controller private instance data.
  577. *
  578. * Update poison registers as per DDR mapping upon write of the address
  579. * location the fault is injected.
  580. * Return: none.
  581. */
  582. static void poison_setup(struct edac_priv *priv)
  583. {
  584. u32 col = 0, row = 0, bank = 0, grp = 0, rank = 0, lrank = 0, ch = 0;
  585. u32 index, regval;
  586. for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
  587. row |= (((priv->err_inject_addr >> priv->row_bit[index]) &
  588. BIT(0)) << index);
  589. }
  590. for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
  591. col |= (((priv->err_inject_addr >> priv->col_bit[index]) &
  592. BIT(0)) << index);
  593. }
  594. for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
  595. bank |= (((priv->err_inject_addr >> priv->bank_bit[index]) &
  596. BIT(0)) << index);
  597. }
  598. for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
  599. grp |= (((priv->err_inject_addr >> priv->grp_bit[index]) &
  600. BIT(0)) << index);
  601. }
  602. for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
  603. rank |= (((priv->err_inject_addr >> priv->rank_bit[index]) &
  604. BIT(0)) << index);
  605. }
  606. for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
  607. lrank |= (((priv->err_inject_addr >> priv->lrank_bit[index]) &
  608. BIT(0)) << index);
  609. }
  610. ch = (priv->err_inject_addr >> priv->ch_bit) & BIT(0);
  611. if (ch)
  612. writel(0xFF, priv->ddrmc_baseaddr + ECCW1_FLIP_CTRL);
  613. else
  614. writel(0xFF, priv->ddrmc_baseaddr + ECCW0_FLIP_CTRL);
  615. writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC12_OFFSET);
  616. writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC13_OFFSET);
  617. regval = row & XDDR_NOC_ROW_MATCH_MASK;
  618. regval |= FIELD_PREP(XDDR_NOC_COL_MATCH_MASK, col);
  619. regval |= FIELD_PREP(XDDR_NOC_BANK_MATCH_MASK, bank);
  620. regval |= FIELD_PREP(XDDR_NOC_GRP_MATCH_MASK, grp);
  621. writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC14_OFFSET);
  622. regval = rank & XDDR_NOC_RANK_MATCH_MASK;
  623. regval |= FIELD_PREP(XDDR_NOC_LRANK_MATCH_MASK, lrank);
  624. regval |= FIELD_PREP(XDDR_NOC_CH_MATCH_MASK, ch);
  625. regval |= (XDDR_NOC_MOD_SEL_MASK | XDDR_NOC_MATCH_EN_MASK);
  626. writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET);
  627. }
  628. static void xddr_inject_data_ce_store(struct mem_ctl_info *mci, u8 ce_bitpos)
  629. {
  630. u32 ecc0_flip0, ecc1_flip0, ecc0_flip1, ecc1_flip1;
  631. struct edac_priv *priv = mci->pvt_info;
  632. if (ce_bitpos < ECCW0_FLIP0_BITS) {
  633. ecc0_flip0 = BIT(ce_bitpos);
  634. ecc1_flip0 = BIT(ce_bitpos);
  635. ecc0_flip1 = 0;
  636. ecc1_flip1 = 0;
  637. } else {
  638. ce_bitpos = ce_bitpos - ECCW0_FLIP0_BITS;
  639. ecc0_flip1 = BIT(ce_bitpos);
  640. ecc1_flip1 = BIT(ce_bitpos);
  641. ecc0_flip0 = 0;
  642. ecc1_flip0 = 0;
  643. }
  644. writel(ecc0_flip0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
  645. writel(ecc1_flip0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
  646. writel(ecc0_flip1, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
  647. writel(ecc1_flip1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
  648. }
  649. /*
  650. * To inject a correctable error, the following steps are needed:
  651. *
  652. * - Write the correctable error bit position value:
  653. * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ce
  654. *
  655. * poison_setup() derives the row, column, bank, group and rank and
  656. * writes to the ADEC registers based on the address given by the user.
  657. *
  658. * The ADEC12 and ADEC13 are mask registers; write 0 to make sure default
  659. * configuration is there and no addresses are masked.
  660. *
  661. * The row, column, bank, group and rank registers are written to the
  662. * match ADEC bit to generate errors at the particular address. ADEC14
  663. * and ADEC15 have the match bits.
  664. *
  665. * xddr_inject_data_ce_store() updates the ECC FLIP registers with the
  666. * bits to be corrupted based on the bit position given by the user.
  667. *
  668. * Upon doing a read to the address the errors are injected.
  669. */
  670. static ssize_t inject_data_ce_store(struct file *file, const char __user *data,
  671. size_t count, loff_t *ppos)
  672. {
  673. struct device *dev = file->private_data;
  674. struct mem_ctl_info *mci = to_mci(dev);
  675. struct edac_priv *priv = mci->pvt_info;
  676. u8 ce_bitpos;
  677. int ret;
  678. ret = kstrtou8_from_user(data, count, 0, &ce_bitpos);
  679. if (ret)
  680. return ret;
  681. /* Unlock the PCSR registers */
  682. writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  683. writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
  684. poison_setup(priv);
  685. xddr_inject_data_ce_store(mci, ce_bitpos);
  686. ret = count;
  687. /* Lock the PCSR registers */
  688. writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  689. writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
  690. return ret;
  691. }
  692. static const struct file_operations xddr_inject_ce_fops = {
  693. .open = simple_open,
  694. .write = inject_data_ce_store,
  695. .llseek = generic_file_llseek,
  696. };
  697. static void xddr_inject_data_ue_store(struct mem_ctl_info *mci, u32 val0, u32 val1)
  698. {
  699. struct edac_priv *priv = mci->pvt_info;
  700. writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
  701. writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
  702. writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
  703. writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
  704. }
  705. /*
  706. * To inject an uncorrectable error, the following steps are needed:
  707. * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ue
  708. *
  709. * poison_setup() derives the row, column, bank, group and rank and
  710. * writes to the ADEC registers based on the address given by the user.
  711. *
  712. * The ADEC12 and ADEC13 are mask registers; write 0 so that none of the
  713. * addresses are masked. The row, column, bank, group and rank registers
  714. * are written to the match ADEC bit to generate errors at the
  715. * particular address. ADEC14 and ADEC15 have the match bits.
  716. *
  717. * xddr_inject_data_ue_store() updates the ECC FLIP registers with the
  718. * bits to be corrupted based on the bit position given by the user. For
  719. * uncorrectable errors
  720. * 2 bit errors are injected.
  721. *
  722. * Upon doing a read to the address the errors are injected.
  723. */
  724. static ssize_t inject_data_ue_store(struct file *file, const char __user *data,
  725. size_t count, loff_t *ppos)
  726. {
  727. struct device *dev = file->private_data;
  728. struct mem_ctl_info *mci = to_mci(dev);
  729. struct edac_priv *priv = mci->pvt_info;
  730. char buf[6], *pbuf, *token[2];
  731. u32 val0 = 0, val1 = 0;
  732. u8 len, ue0, ue1;
  733. int i, ret;
  734. len = min_t(size_t, count, sizeof(buf));
  735. if (copy_from_user(buf, data, len))
  736. return -EFAULT;
  737. buf[len] = '\0';
  738. pbuf = &buf[0];
  739. for (i = 0; i < NUM_UE_BITPOS; i++)
  740. token[i] = strsep(&pbuf, ",");
  741. if (!token[0] || !token[1])
  742. return -EFAULT;
  743. ret = kstrtou8(token[0], 0, &ue0);
  744. if (ret)
  745. return ret;
  746. ret = kstrtou8(token[1], 0, &ue1);
  747. if (ret)
  748. return ret;
  749. if (ue0 < ECCW0_FLIP0_BITS) {
  750. val0 = BIT(ue0);
  751. } else {
  752. ue0 = ue0 - ECCW0_FLIP0_BITS;
  753. val1 = BIT(ue0);
  754. }
  755. if (ue1 < ECCW0_FLIP0_BITS) {
  756. val0 |= BIT(ue1);
  757. } else {
  758. ue1 = ue1 - ECCW0_FLIP0_BITS;
  759. val1 |= BIT(ue1);
  760. }
  761. /* Unlock the PCSR registers */
  762. writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  763. writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
  764. poison_setup(priv);
  765. xddr_inject_data_ue_store(mci, val0, val1);
  766. /* Lock the PCSR registers */
  767. writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
  768. writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
  769. return count;
  770. }
  771. static const struct file_operations xddr_inject_ue_fops = {
  772. .open = simple_open,
  773. .write = inject_data_ue_store,
  774. .llseek = generic_file_llseek,
  775. };
  776. static void create_debugfs_attributes(struct mem_ctl_info *mci)
  777. {
  778. struct edac_priv *priv = mci->pvt_info;
  779. priv->debugfs = edac_debugfs_create_dir(mci->dev_name);
  780. if (!priv->debugfs)
  781. return;
  782. if (!edac_debugfs_create_file("inject_ce", 0200, priv->debugfs,
  783. &mci->dev, &xddr_inject_ce_fops)) {
  784. debugfs_remove_recursive(priv->debugfs);
  785. return;
  786. }
  787. if (!edac_debugfs_create_file("inject_ue", 0200, priv->debugfs,
  788. &mci->dev, &xddr_inject_ue_fops)) {
  789. debugfs_remove_recursive(priv->debugfs);
  790. return;
  791. }
  792. debugfs_create_x64("address", 0600, priv->debugfs,
  793. &priv->err_inject_addr);
  794. mci->debugfs = priv->debugfs;
  795. }
  796. static inline void process_bit(struct edac_priv *priv, unsigned int start, u32 regval)
  797. {
  798. union edac_info rows;
  799. rows.i = regval;
  800. priv->row_bit[start] = rows.row0;
  801. priv->row_bit[start + 1] = rows.row1;
  802. priv->row_bit[start + 2] = rows.row2;
  803. priv->row_bit[start + 3] = rows.row3;
  804. priv->row_bit[start + 4] = rows.row4;
  805. }
  806. static void setup_row_address_map(struct edac_priv *priv)
  807. {
  808. u32 regval;
  809. union edac_info rows;
  810. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC5_OFFSET);
  811. process_bit(priv, 0, regval);
  812. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC6_OFFSET);
  813. process_bit(priv, 5, regval);
  814. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC7_OFFSET);
  815. process_bit(priv, 10, regval);
  816. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
  817. rows.i = regval;
  818. priv->row_bit[15] = rows.row0;
  819. priv->row_bit[16] = rows.row1;
  820. priv->row_bit[17] = rows.row2;
  821. }
  822. static void setup_column_address_map(struct edac_priv *priv)
  823. {
  824. u32 regval;
  825. union edac_info cols;
  826. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
  827. priv->col_bit[0] = FIELD_GET(MASK_24, regval);
  828. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC9_OFFSET);
  829. cols.i = regval;
  830. priv->col_bit[1] = cols.col1;
  831. priv->col_bit[2] = cols.col2;
  832. priv->col_bit[3] = cols.col3;
  833. priv->col_bit[4] = cols.col4;
  834. priv->col_bit[5] = cols.col5;
  835. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
  836. cols.i = regval;
  837. priv->col_bit[6] = cols.col1;
  838. priv->col_bit[7] = cols.col2;
  839. priv->col_bit[8] = cols.col3;
  840. priv->col_bit[9] = cols.col4;
  841. }
  842. static void setup_bank_grp_ch_address_map(struct edac_priv *priv)
  843. {
  844. u32 regval;
  845. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
  846. priv->bank_bit[0] = FIELD_GET(MASK_24, regval);
  847. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC11_OFFSET);
  848. priv->bank_bit[1] = (regval & MASK_0);
  849. priv->grp_bit[0] = FIELD_GET(GRP_0_MASK, regval);
  850. priv->grp_bit[1] = FIELD_GET(GRP_1_MASK, regval);
  851. priv->ch_bit = FIELD_GET(CH_0_MASK, regval);
  852. }
  853. static void setup_rank_lrank_address_map(struct edac_priv *priv)
  854. {
  855. u32 regval;
  856. regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC4_OFFSET);
  857. priv->rank_bit[0] = (regval & MASK_0);
  858. priv->rank_bit[1] = FIELD_GET(RANK_1_MASK, regval);
  859. priv->lrank_bit[0] = FIELD_GET(LRANK_0_MASK, regval);
  860. priv->lrank_bit[1] = FIELD_GET(LRANK_1_MASK, regval);
  861. priv->lrank_bit[2] = FIELD_GET(MASK_24, regval);
  862. }
  863. /**
  864. * setup_address_map - Set Address Map by querying ADDRMAP registers.
  865. * @priv: DDR memory controller private instance data.
  866. *
  867. * Set Address Map by querying ADDRMAP registers.
  868. *
  869. * Return: none.
  870. */
  871. static void setup_address_map(struct edac_priv *priv)
  872. {
  873. setup_row_address_map(priv);
  874. setup_column_address_map(priv);
  875. setup_bank_grp_ch_address_map(priv);
  876. setup_rank_lrank_address_map(priv);
  877. }
  878. #endif /* CONFIG_EDAC_DEBUG */
  879. static const struct of_device_id xlnx_edac_match[] = {
  880. { .compatible = "xlnx,versal-ddrmc", },
  881. {
  882. /* end of table */
  883. }
  884. };
  885. MODULE_DEVICE_TABLE(of, xlnx_edac_match);
  886. static u32 emif_get_id(struct device_node *node)
  887. {
  888. u32 addr, my_addr, my_id = 0;
  889. struct device_node *np;
  890. const __be32 *addrp;
  891. addrp = of_get_address(node, 0, NULL, NULL);
  892. my_addr = (u32)of_translate_address(node, addrp);
  893. for_each_matching_node(np, xlnx_edac_match) {
  894. if (np == node)
  895. continue;
  896. addrp = of_get_address(np, 0, NULL, NULL);
  897. addr = (u32)of_translate_address(np, addrp);
  898. edac_printk(KERN_INFO, EDAC_MC,
  899. "addr=%x, my_addr=%x\n",
  900. addr, my_addr);
  901. if (addr < my_addr)
  902. my_id++;
  903. }
  904. return my_id;
  905. }
  906. static int mc_probe(struct platform_device *pdev)
  907. {
  908. void __iomem *ddrmc_baseaddr, *ddrmc_noc_baseaddr;
  909. struct edac_mc_layer layers[2];
  910. struct mem_ctl_info *mci;
  911. u8 num_chans, num_csrows;
  912. struct edac_priv *priv;
  913. u32 edac_mc_id, regval;
  914. int rc;
  915. ddrmc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "base");
  916. if (IS_ERR(ddrmc_baseaddr))
  917. return PTR_ERR(ddrmc_baseaddr);
  918. ddrmc_noc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "noc");
  919. if (IS_ERR(ddrmc_noc_baseaddr))
  920. return PTR_ERR(ddrmc_noc_baseaddr);
  921. if (!get_ecc_state(ddrmc_baseaddr))
  922. return -ENXIO;
  923. /* Allocate ID number for the EMIF controller */
  924. edac_mc_id = emif_get_id(pdev->dev.of_node);
  925. regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
  926. num_chans = FIELD_GET(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
  927. num_chans++;
  928. num_csrows = FIELD_GET(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
  929. num_csrows *= 2;
  930. if (!num_csrows)
  931. num_csrows = 1;
  932. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  933. layers[0].size = num_csrows;
  934. layers[0].is_virt_csrow = true;
  935. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  936. layers[1].size = num_chans;
  937. layers[1].is_virt_csrow = false;
  938. mci = edac_mc_alloc(edac_mc_id, ARRAY_SIZE(layers), layers,
  939. sizeof(struct edac_priv));
  940. if (!mci) {
  941. edac_printk(KERN_ERR, EDAC_MC,
  942. "Failed memory allocation for mc instance\n");
  943. return -ENOMEM;
  944. }
  945. priv = mci->pvt_info;
  946. priv->ddrmc_baseaddr = ddrmc_baseaddr;
  947. priv->ddrmc_noc_baseaddr = ddrmc_noc_baseaddr;
  948. priv->ce_cnt = 0;
  949. priv->ue_cnt = 0;
  950. priv->mc_id = edac_mc_id;
  951. mc_init(mci, pdev);
  952. rc = edac_mc_add_mc(mci);
  953. if (rc) {
  954. edac_printk(KERN_ERR, EDAC_MC,
  955. "Failed to register with EDAC core\n");
  956. goto free_edac_mc;
  957. }
  958. rc = xlnx_register_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
  959. XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR,
  960. false, err_callback, mci);
  961. if (rc) {
  962. if (rc == -EACCES)
  963. rc = -EPROBE_DEFER;
  964. goto del_mc;
  965. }
  966. #ifdef CONFIG_EDAC_DEBUG
  967. create_debugfs_attributes(mci);
  968. setup_address_map(priv);
  969. #endif
  970. enable_intr(priv);
  971. return rc;
  972. del_mc:
  973. edac_mc_del_mc(&pdev->dev);
  974. free_edac_mc:
  975. edac_mc_free(mci);
  976. return rc;
  977. }
  978. static void mc_remove(struct platform_device *pdev)
  979. {
  980. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  981. struct edac_priv *priv = mci->pvt_info;
  982. disable_intr(priv);
  983. #ifdef CONFIG_EDAC_DEBUG
  984. debugfs_remove_recursive(priv->debugfs);
  985. #endif
  986. xlnx_unregister_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
  987. XPM_EVENT_ERROR_MASK_DDRMC_CR |
  988. XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
  989. edac_mc_del_mc(&pdev->dev);
  990. edac_mc_free(mci);
  991. }
  992. static struct platform_driver xilinx_ddr_edac_mc_driver = {
  993. .driver = {
  994. .name = "xilinx-ddrmc-edac",
  995. .of_match_table = xlnx_edac_match,
  996. },
  997. .probe = mc_probe,
  998. .remove_new = mc_remove,
  999. };
  1000. module_platform_driver(xilinx_ddr_edac_mc_driver);
  1001. MODULE_AUTHOR("AMD Inc");
  1002. MODULE_DESCRIPTION("Xilinx DDRMC ECC driver");
  1003. MODULE_LICENSE("GPL");