synopsys_edac.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Synopsys DDR ECC Driver
  4. * This driver is based on ppc4xx_edac.c drivers
  5. *
  6. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  7. */
  8. #include <linux/edac.h>
  9. #include <linux/module.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/sizes.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/of.h>
  15. #include "edac_module.h"
  16. /* Number of cs_rows needed per memory controller */
  17. #define SYNPS_EDAC_NR_CSROWS 1
  18. /* Number of channels per memory controller */
  19. #define SYNPS_EDAC_NR_CHANS 1
  20. /* Granularity of reported error in bytes */
  21. #define SYNPS_EDAC_ERR_GRAIN 1
  22. #define SYNPS_EDAC_MSG_SIZE 256
  23. #define SYNPS_EDAC_MOD_STRING "synps_edac"
  24. #define SYNPS_EDAC_MOD_VER "1"
  25. /* Synopsys DDR memory controller registers that are relevant to ECC */
  26. #define CTRL_OFST 0x0
  27. #define T_ZQ_OFST 0xA4
  28. /* ECC control register */
  29. #define ECC_CTRL_OFST 0xC4
  30. /* ECC log register */
  31. #define CE_LOG_OFST 0xC8
  32. /* ECC address register */
  33. #define CE_ADDR_OFST 0xCC
  34. /* ECC data[31:0] register */
  35. #define CE_DATA_31_0_OFST 0xD0
  36. /* Uncorrectable error info registers */
  37. #define UE_LOG_OFST 0xDC
  38. #define UE_ADDR_OFST 0xE0
  39. #define UE_DATA_31_0_OFST 0xE4
  40. #define STAT_OFST 0xF0
  41. #define SCRUB_OFST 0xF4
  42. /* Control register bit field definitions */
  43. #define CTRL_BW_MASK 0xC
  44. #define CTRL_BW_SHIFT 2
  45. #define DDRCTL_WDTH_16 1
  46. #define DDRCTL_WDTH_32 0
  47. /* ZQ register bit field definitions */
  48. #define T_ZQ_DDRMODE_MASK 0x2
  49. /* ECC control register bit field definitions */
  50. #define ECC_CTRL_CLR_CE_ERR 0x2
  51. #define ECC_CTRL_CLR_UE_ERR 0x1
  52. /* ECC correctable/uncorrectable error log register definitions */
  53. #define LOG_VALID 0x1
  54. #define CE_LOG_BITPOS_MASK 0xFE
  55. #define CE_LOG_BITPOS_SHIFT 1
  56. /* ECC correctable/uncorrectable error address register definitions */
  57. #define ADDR_COL_MASK 0xFFF
  58. #define ADDR_ROW_MASK 0xFFFF000
  59. #define ADDR_ROW_SHIFT 12
  60. #define ADDR_BANK_MASK 0x70000000
  61. #define ADDR_BANK_SHIFT 28
  62. /* ECC statistic register definitions */
  63. #define STAT_UECNT_MASK 0xFF
  64. #define STAT_CECNT_MASK 0xFF00
  65. #define STAT_CECNT_SHIFT 8
  66. /* ECC scrub register definitions */
  67. #define SCRUB_MODE_MASK 0x7
  68. #define SCRUB_MODE_SECDED 0x4
  69. /* DDR ECC Quirks */
  70. #define DDR_ECC_INTR_SUPPORT BIT(0)
  71. #define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
  72. #define DDR_ECC_INTR_SELF_CLEAR BIT(2)
  73. /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
  74. /* ECC Configuration Registers */
  75. #define ECC_CFG0_OFST 0x70
  76. #define ECC_CFG1_OFST 0x74
  77. /* ECC Status Register */
  78. #define ECC_STAT_OFST 0x78
  79. /* ECC Clear Register */
  80. #define ECC_CLR_OFST 0x7C
  81. /* ECC Error count Register */
  82. #define ECC_ERRCNT_OFST 0x80
  83. /* ECC Corrected Error Address Register */
  84. #define ECC_CEADDR0_OFST 0x84
  85. #define ECC_CEADDR1_OFST 0x88
  86. /* ECC Syndrome Registers */
  87. #define ECC_CSYND0_OFST 0x8C
  88. #define ECC_CSYND1_OFST 0x90
  89. #define ECC_CSYND2_OFST 0x94
  90. /* ECC Bit Mask0 Address Register */
  91. #define ECC_BITMASK0_OFST 0x98
  92. #define ECC_BITMASK1_OFST 0x9C
  93. #define ECC_BITMASK2_OFST 0xA0
  94. /* ECC UnCorrected Error Address Register */
  95. #define ECC_UEADDR0_OFST 0xA4
  96. #define ECC_UEADDR1_OFST 0xA8
  97. /* ECC Syndrome Registers */
  98. #define ECC_UESYND0_OFST 0xAC
  99. #define ECC_UESYND1_OFST 0xB0
  100. #define ECC_UESYND2_OFST 0xB4
  101. /* ECC Poison Address Reg */
  102. #define ECC_POISON0_OFST 0xB8
  103. #define ECC_POISON1_OFST 0xBC
  104. #define ECC_ADDRMAP0_OFFSET 0x200
  105. /* Control register bitfield definitions */
  106. #define ECC_CTRL_BUSWIDTH_MASK 0x3000
  107. #define ECC_CTRL_BUSWIDTH_SHIFT 12
  108. #define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
  109. #define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
  110. /* DDR Control Register width definitions */
  111. #define DDRCTL_EWDTH_16 2
  112. #define DDRCTL_EWDTH_32 1
  113. #define DDRCTL_EWDTH_64 0
  114. /* ECC status register definitions */
  115. #define ECC_STAT_UECNT_MASK 0xF0000
  116. #define ECC_STAT_UECNT_SHIFT 16
  117. #define ECC_STAT_CECNT_MASK 0xF00
  118. #define ECC_STAT_CECNT_SHIFT 8
  119. #define ECC_STAT_BITNUM_MASK 0x7F
  120. /* ECC error count register definitions */
  121. #define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
  122. #define ECC_ERRCNT_UECNT_SHIFT 16
  123. #define ECC_ERRCNT_CECNT_MASK 0xFFFF
  124. /* DDR QOS Interrupt register definitions */
  125. #define DDR_QOS_IRQ_STAT_OFST 0x20200
  126. #define DDR_QOSUE_MASK 0x4
  127. #define DDR_QOSCE_MASK 0x2
  128. #define ECC_CE_UE_INTR_MASK 0x6
  129. #define DDR_QOS_IRQ_EN_OFST 0x20208
  130. #define DDR_QOS_IRQ_DB_OFST 0x2020C
  131. /* DDR QOS Interrupt register definitions */
  132. #define DDR_UE_MASK BIT(9)
  133. #define DDR_CE_MASK BIT(8)
  134. /* ECC Corrected Error Register Mask and Shifts*/
  135. #define ECC_CEADDR0_RW_MASK 0x3FFFF
  136. #define ECC_CEADDR0_RNK_MASK BIT(24)
  137. #define ECC_CEADDR1_BNKGRP_MASK 0x3000000
  138. #define ECC_CEADDR1_BNKNR_MASK 0x70000
  139. #define ECC_CEADDR1_BLKNR_MASK 0xFFF
  140. #define ECC_CEADDR1_BNKGRP_SHIFT 24
  141. #define ECC_CEADDR1_BNKNR_SHIFT 16
  142. /* ECC Poison register shifts */
  143. #define ECC_POISON0_RANK_SHIFT 24
  144. #define ECC_POISON0_RANK_MASK BIT(24)
  145. #define ECC_POISON0_COLUMN_SHIFT 0
  146. #define ECC_POISON0_COLUMN_MASK 0xFFF
  147. #define ECC_POISON1_BG_SHIFT 28
  148. #define ECC_POISON1_BG_MASK 0x30000000
  149. #define ECC_POISON1_BANKNR_SHIFT 24
  150. #define ECC_POISON1_BANKNR_MASK 0x7000000
  151. #define ECC_POISON1_ROW_SHIFT 0
  152. #define ECC_POISON1_ROW_MASK 0x3FFFF
  153. /* DDR Memory type defines */
  154. #define MEM_TYPE_DDR3 0x1
  155. #define MEM_TYPE_LPDDR3 0x8
  156. #define MEM_TYPE_DDR2 0x4
  157. #define MEM_TYPE_DDR4 0x10
  158. #define MEM_TYPE_LPDDR4 0x20
  159. /* DDRC Software control register */
  160. #define DDRC_SWCTL 0x320
  161. /* DDRC ECC CE & UE poison mask */
  162. #define ECC_CEPOISON_MASK 0x3
  163. #define ECC_UEPOISON_MASK 0x1
  164. /* DDRC Device config masks */
  165. #define DDRC_MSTR_CFG_MASK 0xC0000000
  166. #define DDRC_MSTR_CFG_SHIFT 30
  167. #define DDRC_MSTR_CFG_X4_MASK 0x0
  168. #define DDRC_MSTR_CFG_X8_MASK 0x1
  169. #define DDRC_MSTR_CFG_X16_MASK 0x2
  170. #define DDRC_MSTR_CFG_X32_MASK 0x3
  171. #define DDR_MAX_ROW_SHIFT 18
  172. #define DDR_MAX_COL_SHIFT 14
  173. #define DDR_MAX_BANK_SHIFT 3
  174. #define DDR_MAX_BANKGRP_SHIFT 2
  175. #define ROW_MAX_VAL_MASK 0xF
  176. #define COL_MAX_VAL_MASK 0xF
  177. #define BANK_MAX_VAL_MASK 0x1F
  178. #define BANKGRP_MAX_VAL_MASK 0x1F
  179. #define RANK_MAX_VAL_MASK 0x1F
  180. #define ROW_B0_BASE 6
  181. #define ROW_B1_BASE 7
  182. #define ROW_B2_BASE 8
  183. #define ROW_B3_BASE 9
  184. #define ROW_B4_BASE 10
  185. #define ROW_B5_BASE 11
  186. #define ROW_B6_BASE 12
  187. #define ROW_B7_BASE 13
  188. #define ROW_B8_BASE 14
  189. #define ROW_B9_BASE 15
  190. #define ROW_B10_BASE 16
  191. #define ROW_B11_BASE 17
  192. #define ROW_B12_BASE 18
  193. #define ROW_B13_BASE 19
  194. #define ROW_B14_BASE 20
  195. #define ROW_B15_BASE 21
  196. #define ROW_B16_BASE 22
  197. #define ROW_B17_BASE 23
  198. #define COL_B2_BASE 2
  199. #define COL_B3_BASE 3
  200. #define COL_B4_BASE 4
  201. #define COL_B5_BASE 5
  202. #define COL_B6_BASE 6
  203. #define COL_B7_BASE 7
  204. #define COL_B8_BASE 8
  205. #define COL_B9_BASE 9
  206. #define COL_B10_BASE 10
  207. #define COL_B11_BASE 11
  208. #define COL_B12_BASE 12
  209. #define COL_B13_BASE 13
  210. #define BANK_B0_BASE 2
  211. #define BANK_B1_BASE 3
  212. #define BANK_B2_BASE 4
  213. #define BANKGRP_B0_BASE 2
  214. #define BANKGRP_B1_BASE 3
  215. #define RANK_B0_BASE 6
  216. /**
  217. * struct ecc_error_info - ECC error log information.
  218. * @row: Row number.
  219. * @col: Column number.
  220. * @bank: Bank number.
  221. * @bitpos: Bit position.
  222. * @data: Data causing the error.
  223. * @bankgrpnr: Bank group number.
  224. * @blknr: Block number.
  225. */
  226. struct ecc_error_info {
  227. u32 row;
  228. u32 col;
  229. u32 bank;
  230. u32 bitpos;
  231. u32 data;
  232. u32 bankgrpnr;
  233. u32 blknr;
  234. };
  235. /**
  236. * struct synps_ecc_status - ECC status information to report.
  237. * @ce_cnt: Correctable error count.
  238. * @ue_cnt: Uncorrectable error count.
  239. * @ceinfo: Correctable error log information.
  240. * @ueinfo: Uncorrectable error log information.
  241. */
  242. struct synps_ecc_status {
  243. u32 ce_cnt;
  244. u32 ue_cnt;
  245. struct ecc_error_info ceinfo;
  246. struct ecc_error_info ueinfo;
  247. };
  248. /**
  249. * struct synps_edac_priv - DDR memory controller private instance data.
  250. * @baseaddr: Base address of the DDR controller.
  251. * @reglock: Concurrent CSRs access lock.
  252. * @message: Buffer for framing the event specific info.
  253. * @stat: ECC status information.
  254. * @p_data: Platform data.
  255. * @ce_cnt: Correctable Error count.
  256. * @ue_cnt: Uncorrectable Error count.
  257. * @poison_addr: Data poison address.
  258. * @row_shift: Bit shifts for row bit.
  259. * @col_shift: Bit shifts for column bit.
  260. * @bank_shift: Bit shifts for bank bit.
  261. * @bankgrp_shift: Bit shifts for bank group bit.
  262. * @rank_shift: Bit shifts for rank bit.
  263. */
  264. struct synps_edac_priv {
  265. void __iomem *baseaddr;
  266. spinlock_t reglock;
  267. char message[SYNPS_EDAC_MSG_SIZE];
  268. struct synps_ecc_status stat;
  269. const struct synps_platform_data *p_data;
  270. u32 ce_cnt;
  271. u32 ue_cnt;
  272. #ifdef CONFIG_EDAC_DEBUG
  273. ulong poison_addr;
  274. u32 row_shift[18];
  275. u32 col_shift[14];
  276. u32 bank_shift[3];
  277. u32 bankgrp_shift[2];
  278. u32 rank_shift[1];
  279. #endif
  280. };
  281. enum synps_platform_type {
  282. ZYNQ,
  283. ZYNQMP,
  284. SYNPS,
  285. };
  286. /**
  287. * struct synps_platform_data - synps platform data structure.
  288. * @platform: Identifies the target hardware platform
  289. * @get_error_info: Get EDAC error info.
  290. * @get_mtype: Get mtype.
  291. * @get_dtype: Get dtype.
  292. * @get_mem_info: Get EDAC memory info
  293. * @quirks: To differentiate IPs.
  294. */
  295. struct synps_platform_data {
  296. enum synps_platform_type platform;
  297. int (*get_error_info)(struct synps_edac_priv *priv);
  298. enum mem_type (*get_mtype)(const void __iomem *base);
  299. enum dev_type (*get_dtype)(const void __iomem *base);
  300. #ifdef CONFIG_EDAC_DEBUG
  301. u64 (*get_mem_info)(struct synps_edac_priv *priv);
  302. #endif
  303. int quirks;
  304. };
  305. /**
  306. * zynq_get_error_info - Get the current ECC error info.
  307. * @priv: DDR memory controller private instance data.
  308. *
  309. * Return: one if there is no error, otherwise zero.
  310. */
  311. static int zynq_get_error_info(struct synps_edac_priv *priv)
  312. {
  313. struct synps_ecc_status *p;
  314. u32 regval, clearval = 0;
  315. void __iomem *base;
  316. base = priv->baseaddr;
  317. p = &priv->stat;
  318. regval = readl(base + STAT_OFST);
  319. if (!regval)
  320. return 1;
  321. p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
  322. p->ue_cnt = regval & STAT_UECNT_MASK;
  323. regval = readl(base + CE_LOG_OFST);
  324. if (!(p->ce_cnt && (regval & LOG_VALID)))
  325. goto ue_err;
  326. p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
  327. regval = readl(base + CE_ADDR_OFST);
  328. p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
  329. p->ceinfo.col = regval & ADDR_COL_MASK;
  330. p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
  331. p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
  332. edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
  333. p->ceinfo.data);
  334. clearval = ECC_CTRL_CLR_CE_ERR;
  335. ue_err:
  336. regval = readl(base + UE_LOG_OFST);
  337. if (!(p->ue_cnt && (regval & LOG_VALID)))
  338. goto out;
  339. regval = readl(base + UE_ADDR_OFST);
  340. p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
  341. p->ueinfo.col = regval & ADDR_COL_MASK;
  342. p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
  343. p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
  344. clearval |= ECC_CTRL_CLR_UE_ERR;
  345. out:
  346. writel(clearval, base + ECC_CTRL_OFST);
  347. writel(0x0, base + ECC_CTRL_OFST);
  348. return 0;
  349. }
  350. #ifdef CONFIG_EDAC_DEBUG
  351. /**
  352. * zynqmp_get_mem_info - Get the current memory info.
  353. * @priv: DDR memory controller private instance data.
  354. *
  355. * Return: host interface address.
  356. */
  357. static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv)
  358. {
  359. u64 hif_addr = 0, linear_addr;
  360. linear_addr = priv->poison_addr;
  361. if (linear_addr >= SZ_32G)
  362. linear_addr = linear_addr - SZ_32G + SZ_2G;
  363. hif_addr = linear_addr >> 3;
  364. return hif_addr;
  365. }
  366. #endif
  367. /**
  368. * zynqmp_get_error_info - Get the current ECC error info.
  369. * @priv: DDR memory controller private instance data.
  370. *
  371. * Return: one if there is no error otherwise returns zero.
  372. */
  373. static int zynqmp_get_error_info(struct synps_edac_priv *priv)
  374. {
  375. struct synps_ecc_status *p;
  376. u32 regval, clearval;
  377. unsigned long flags;
  378. void __iomem *base;
  379. base = priv->baseaddr;
  380. p = &priv->stat;
  381. regval = readl(base + ECC_ERRCNT_OFST);
  382. p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
  383. p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
  384. if (!p->ce_cnt)
  385. goto ue_err;
  386. regval = readl(base + ECC_STAT_OFST);
  387. if (!regval)
  388. return 1;
  389. p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
  390. regval = readl(base + ECC_CEADDR0_OFST);
  391. p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
  392. regval = readl(base + ECC_CEADDR1_OFST);
  393. p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
  394. ECC_CEADDR1_BNKNR_SHIFT;
  395. p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
  396. ECC_CEADDR1_BNKGRP_SHIFT;
  397. p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
  398. p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
  399. edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
  400. readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
  401. readl(base + ECC_CSYND2_OFST));
  402. ue_err:
  403. if (!p->ue_cnt)
  404. goto out;
  405. regval = readl(base + ECC_UEADDR0_OFST);
  406. p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
  407. regval = readl(base + ECC_UEADDR1_OFST);
  408. p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
  409. ECC_CEADDR1_BNKGRP_SHIFT;
  410. p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
  411. ECC_CEADDR1_BNKNR_SHIFT;
  412. p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
  413. p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
  414. out:
  415. spin_lock_irqsave(&priv->reglock, flags);
  416. clearval = readl(base + ECC_CLR_OFST) |
  417. ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
  418. ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
  419. writel(clearval, base + ECC_CLR_OFST);
  420. spin_unlock_irqrestore(&priv->reglock, flags);
  421. return 0;
  422. }
  423. /**
  424. * handle_error - Handle Correctable and Uncorrectable errors.
  425. * @mci: EDAC memory controller instance.
  426. * @p: Synopsys ECC status structure.
  427. *
  428. * Handles ECC correctable and uncorrectable errors.
  429. */
  430. static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
  431. {
  432. struct synps_edac_priv *priv = mci->pvt_info;
  433. struct ecc_error_info *pinf;
  434. if (p->ce_cnt) {
  435. pinf = &p->ceinfo;
  436. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  437. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  438. "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
  439. "CE", pinf->row, pinf->bank,
  440. pinf->bankgrpnr, pinf->blknr,
  441. pinf->bitpos, pinf->data);
  442. } else {
  443. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  444. "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
  445. "CE", pinf->row, pinf->bank, pinf->col,
  446. pinf->bitpos, pinf->data);
  447. }
  448. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  449. p->ce_cnt, 0, 0, 0, 0, 0, -1,
  450. priv->message, "");
  451. }
  452. if (p->ue_cnt) {
  453. pinf = &p->ueinfo;
  454. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  455. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  456. "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
  457. "UE", pinf->row, pinf->bank,
  458. pinf->bankgrpnr, pinf->blknr);
  459. } else {
  460. snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
  461. "DDR ECC error type :%s Row %d Bank %d Col %d ",
  462. "UE", pinf->row, pinf->bank, pinf->col);
  463. }
  464. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  465. p->ue_cnt, 0, 0, 0, 0, 0, -1,
  466. priv->message, "");
  467. }
  468. memset(p, 0, sizeof(*p));
  469. }
  470. static void enable_intr(struct synps_edac_priv *priv)
  471. {
  472. unsigned long flags;
  473. /* Enable UE/CE Interrupts */
  474. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
  475. writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
  476. priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
  477. return;
  478. }
  479. spin_lock_irqsave(&priv->reglock, flags);
  480. writel(DDR_UE_MASK | DDR_CE_MASK,
  481. priv->baseaddr + ECC_CLR_OFST);
  482. spin_unlock_irqrestore(&priv->reglock, flags);
  483. }
  484. static void disable_intr(struct synps_edac_priv *priv)
  485. {
  486. unsigned long flags;
  487. /* Disable UE/CE Interrupts */
  488. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
  489. writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
  490. priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
  491. return;
  492. }
  493. spin_lock_irqsave(&priv->reglock, flags);
  494. writel(0, priv->baseaddr + ECC_CLR_OFST);
  495. spin_unlock_irqrestore(&priv->reglock, flags);
  496. }
  497. /**
  498. * intr_handler - Interrupt Handler for ECC interrupts.
  499. * @irq: IRQ number.
  500. * @dev_id: Device ID.
  501. *
  502. * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
  503. */
  504. static irqreturn_t intr_handler(int irq, void *dev_id)
  505. {
  506. const struct synps_platform_data *p_data;
  507. struct mem_ctl_info *mci = dev_id;
  508. struct synps_edac_priv *priv;
  509. int status, regval;
  510. priv = mci->pvt_info;
  511. p_data = priv->p_data;
  512. /*
  513. * v3.0 of the controller has the ce/ue bits cleared automatically,
  514. * so this condition does not apply.
  515. */
  516. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
  517. regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
  518. regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
  519. if (!(regval & ECC_CE_UE_INTR_MASK))
  520. return IRQ_NONE;
  521. }
  522. status = p_data->get_error_info(priv);
  523. if (status)
  524. return IRQ_NONE;
  525. priv->ce_cnt += priv->stat.ce_cnt;
  526. priv->ue_cnt += priv->stat.ue_cnt;
  527. handle_error(mci, &priv->stat);
  528. edac_dbg(3, "Total error count CE %d UE %d\n",
  529. priv->ce_cnt, priv->ue_cnt);
  530. /* v3.0 of the controller does not have this register */
  531. if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
  532. writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
  533. return IRQ_HANDLED;
  534. }
  535. /**
  536. * check_errors - Check controller for ECC errors.
  537. * @mci: EDAC memory controller instance.
  538. *
  539. * Check and post ECC errors. Called by the polling thread.
  540. */
  541. static void check_errors(struct mem_ctl_info *mci)
  542. {
  543. const struct synps_platform_data *p_data;
  544. struct synps_edac_priv *priv;
  545. int status;
  546. priv = mci->pvt_info;
  547. p_data = priv->p_data;
  548. status = p_data->get_error_info(priv);
  549. if (status)
  550. return;
  551. priv->ce_cnt += priv->stat.ce_cnt;
  552. priv->ue_cnt += priv->stat.ue_cnt;
  553. handle_error(mci, &priv->stat);
  554. edac_dbg(3, "Total error count CE %d UE %d\n",
  555. priv->ce_cnt, priv->ue_cnt);
  556. }
  557. /**
  558. * zynq_get_dtype - Return the controller memory width.
  559. * @base: DDR memory controller base address.
  560. *
  561. * Get the EDAC device type width appropriate for the current controller
  562. * configuration.
  563. *
  564. * Return: a device type width enumeration.
  565. */
  566. static enum dev_type zynq_get_dtype(const void __iomem *base)
  567. {
  568. enum dev_type dt;
  569. u32 width;
  570. width = readl(base + CTRL_OFST);
  571. width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
  572. switch (width) {
  573. case DDRCTL_WDTH_16:
  574. dt = DEV_X2;
  575. break;
  576. case DDRCTL_WDTH_32:
  577. dt = DEV_X4;
  578. break;
  579. default:
  580. dt = DEV_UNKNOWN;
  581. }
  582. return dt;
  583. }
  584. /**
  585. * zynqmp_get_dtype - Return the controller memory width.
  586. * @base: DDR memory controller base address.
  587. *
  588. * Get the EDAC device type width appropriate for the current controller
  589. * configuration.
  590. *
  591. * Return: a device type width enumeration.
  592. */
  593. static enum dev_type zynqmp_get_dtype(const void __iomem *base)
  594. {
  595. enum dev_type dt;
  596. u32 width;
  597. width = readl(base + CTRL_OFST);
  598. width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
  599. switch (width) {
  600. case DDRCTL_EWDTH_16:
  601. dt = DEV_X2;
  602. break;
  603. case DDRCTL_EWDTH_32:
  604. dt = DEV_X4;
  605. break;
  606. case DDRCTL_EWDTH_64:
  607. dt = DEV_X8;
  608. break;
  609. default:
  610. dt = DEV_UNKNOWN;
  611. }
  612. return dt;
  613. }
  614. static bool get_ecc_state(struct synps_edac_priv *priv)
  615. {
  616. u32 ecctype, clearval;
  617. enum dev_type dt;
  618. if (priv->p_data->platform == ZYNQ) {
  619. dt = zynq_get_dtype(priv->baseaddr);
  620. if (dt == DEV_UNKNOWN)
  621. return false;
  622. ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK;
  623. if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) {
  624. clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR;
  625. writel(clearval, priv->baseaddr + ECC_CTRL_OFST);
  626. writel(0x0, priv->baseaddr + ECC_CTRL_OFST);
  627. return true;
  628. }
  629. } else {
  630. dt = zynqmp_get_dtype(priv->baseaddr);
  631. if (dt == DEV_UNKNOWN)
  632. return false;
  633. ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
  634. if (ecctype == SCRUB_MODE_SECDED &&
  635. (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) {
  636. clearval = readl(priv->baseaddr + ECC_CLR_OFST) |
  637. ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
  638. ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
  639. writel(clearval, priv->baseaddr + ECC_CLR_OFST);
  640. return true;
  641. }
  642. }
  643. return false;
  644. }
  645. /**
  646. * get_memsize - Read the size of the attached memory device.
  647. *
  648. * Return: the memory size in bytes.
  649. */
  650. static u32 get_memsize(void)
  651. {
  652. struct sysinfo inf;
  653. si_meminfo(&inf);
  654. return inf.totalram * inf.mem_unit;
  655. }
  656. /**
  657. * zynq_get_mtype - Return the controller memory type.
  658. * @base: Synopsys ECC status structure.
  659. *
  660. * Get the EDAC memory type appropriate for the current controller
  661. * configuration.
  662. *
  663. * Return: a memory type enumeration.
  664. */
  665. static enum mem_type zynq_get_mtype(const void __iomem *base)
  666. {
  667. enum mem_type mt;
  668. u32 memtype;
  669. memtype = readl(base + T_ZQ_OFST);
  670. if (memtype & T_ZQ_DDRMODE_MASK)
  671. mt = MEM_DDR3;
  672. else
  673. mt = MEM_DDR2;
  674. return mt;
  675. }
  676. /**
  677. * zynqmp_get_mtype - Returns controller memory type.
  678. * @base: Synopsys ECC status structure.
  679. *
  680. * Get the EDAC memory type appropriate for the current controller
  681. * configuration.
  682. *
  683. * Return: a memory type enumeration.
  684. */
  685. static enum mem_type zynqmp_get_mtype(const void __iomem *base)
  686. {
  687. enum mem_type mt;
  688. u32 memtype;
  689. memtype = readl(base + CTRL_OFST);
  690. if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
  691. mt = MEM_DDR3;
  692. else if (memtype & MEM_TYPE_DDR2)
  693. mt = MEM_RDDR2;
  694. else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
  695. mt = MEM_DDR4;
  696. else
  697. mt = MEM_EMPTY;
  698. return mt;
  699. }
  700. /**
  701. * init_csrows - Initialize the csrow data.
  702. * @mci: EDAC memory controller instance.
  703. *
  704. * Initialize the chip select rows associated with the EDAC memory
  705. * controller instance.
  706. */
  707. static void init_csrows(struct mem_ctl_info *mci)
  708. {
  709. struct synps_edac_priv *priv = mci->pvt_info;
  710. const struct synps_platform_data *p_data;
  711. struct csrow_info *csi;
  712. struct dimm_info *dimm;
  713. u32 size, row;
  714. int j;
  715. p_data = priv->p_data;
  716. for (row = 0; row < mci->nr_csrows; row++) {
  717. csi = mci->csrows[row];
  718. size = get_memsize();
  719. for (j = 0; j < csi->nr_channels; j++) {
  720. dimm = csi->channels[j]->dimm;
  721. dimm->edac_mode = EDAC_SECDED;
  722. dimm->mtype = p_data->get_mtype(priv->baseaddr);
  723. dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
  724. dimm->grain = SYNPS_EDAC_ERR_GRAIN;
  725. dimm->dtype = p_data->get_dtype(priv->baseaddr);
  726. }
  727. }
  728. }
  729. /**
  730. * mc_init - Initialize one driver instance.
  731. * @mci: EDAC memory controller instance.
  732. * @pdev: platform device.
  733. *
  734. * Perform initialization of the EDAC memory controller instance and
  735. * related driver-private data associated with the memory controller the
  736. * instance is bound to.
  737. */
  738. static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
  739. {
  740. struct synps_edac_priv *priv;
  741. mci->pdev = &pdev->dev;
  742. priv = mci->pvt_info;
  743. platform_set_drvdata(pdev, mci);
  744. /* Initialize controller capabilities and configuration */
  745. mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
  746. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  747. mci->scrub_cap = SCRUB_HW_SRC;
  748. mci->scrub_mode = SCRUB_NONE;
  749. mci->edac_cap = EDAC_FLAG_SECDED;
  750. mci->ctl_name = "synps_ddr_controller";
  751. mci->dev_name = SYNPS_EDAC_MOD_STRING;
  752. mci->mod_name = SYNPS_EDAC_MOD_VER;
  753. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  754. edac_op_state = EDAC_OPSTATE_INT;
  755. } else {
  756. edac_op_state = EDAC_OPSTATE_POLL;
  757. mci->edac_check = check_errors;
  758. }
  759. mci->ctl_page_to_phys = NULL;
  760. init_csrows(mci);
  761. }
  762. static int setup_irq(struct mem_ctl_info *mci,
  763. struct platform_device *pdev)
  764. {
  765. struct synps_edac_priv *priv = mci->pvt_info;
  766. int ret, irq;
  767. irq = platform_get_irq(pdev, 0);
  768. if (irq < 0) {
  769. edac_printk(KERN_ERR, EDAC_MC,
  770. "No IRQ %d in DT\n", irq);
  771. return irq;
  772. }
  773. ret = devm_request_irq(&pdev->dev, irq, intr_handler,
  774. 0, dev_name(&pdev->dev), mci);
  775. if (ret < 0) {
  776. edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
  777. return ret;
  778. }
  779. enable_intr(priv);
  780. return 0;
  781. }
  782. static const struct synps_platform_data zynq_edac_def = {
  783. .platform = ZYNQ,
  784. .get_error_info = zynq_get_error_info,
  785. .get_mtype = zynq_get_mtype,
  786. .get_dtype = zynq_get_dtype,
  787. .quirks = 0,
  788. };
  789. static const struct synps_platform_data zynqmp_edac_def = {
  790. .platform = ZYNQMP,
  791. .get_error_info = zynqmp_get_error_info,
  792. .get_mtype = zynqmp_get_mtype,
  793. .get_dtype = zynqmp_get_dtype,
  794. #ifdef CONFIG_EDAC_DEBUG
  795. .get_mem_info = zynqmp_get_mem_info,
  796. #endif
  797. .quirks = (DDR_ECC_INTR_SUPPORT
  798. #ifdef CONFIG_EDAC_DEBUG
  799. | DDR_ECC_DATA_POISON_SUPPORT
  800. #endif
  801. ),
  802. };
  803. static const struct synps_platform_data synopsys_edac_def = {
  804. .platform = SYNPS,
  805. .get_error_info = zynqmp_get_error_info,
  806. .get_mtype = zynqmp_get_mtype,
  807. .get_dtype = zynqmp_get_dtype,
  808. .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
  809. #ifdef CONFIG_EDAC_DEBUG
  810. | DDR_ECC_DATA_POISON_SUPPORT
  811. #endif
  812. ),
  813. };
  814. static const struct of_device_id synps_edac_match[] = {
  815. {
  816. .compatible = "xlnx,zynq-ddrc-a05",
  817. .data = (void *)&zynq_edac_def
  818. },
  819. {
  820. .compatible = "xlnx,zynqmp-ddrc-2.40a",
  821. .data = (void *)&zynqmp_edac_def
  822. },
  823. {
  824. .compatible = "snps,ddrc-3.80a",
  825. .data = (void *)&synopsys_edac_def
  826. },
  827. {
  828. /* end of table */
  829. }
  830. };
  831. MODULE_DEVICE_TABLE(of, synps_edac_match);
  832. #ifdef CONFIG_EDAC_DEBUG
  833. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  834. /**
  835. * ddr_poison_setup - Update poison registers.
  836. * @priv: DDR memory controller private instance data.
  837. *
  838. * Update poison registers as per DDR mapping.
  839. * Return: none.
  840. */
  841. static void ddr_poison_setup(struct synps_edac_priv *priv)
  842. {
  843. int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
  844. const struct synps_platform_data *p_data;
  845. int index;
  846. ulong hif_addr = 0;
  847. p_data = priv->p_data;
  848. if (p_data->get_mem_info)
  849. hif_addr = p_data->get_mem_info(priv);
  850. else
  851. hif_addr = priv->poison_addr >> 3;
  852. for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
  853. if (priv->row_shift[index])
  854. row |= (((hif_addr >> priv->row_shift[index]) &
  855. BIT(0)) << index);
  856. else
  857. break;
  858. }
  859. for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
  860. if (priv->col_shift[index] || index < 3)
  861. col |= (((hif_addr >> priv->col_shift[index]) &
  862. BIT(0)) << index);
  863. else
  864. break;
  865. }
  866. for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
  867. if (priv->bank_shift[index])
  868. bank |= (((hif_addr >> priv->bank_shift[index]) &
  869. BIT(0)) << index);
  870. else
  871. break;
  872. }
  873. for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
  874. if (priv->bankgrp_shift[index])
  875. bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
  876. & BIT(0)) << index);
  877. else
  878. break;
  879. }
  880. if (priv->rank_shift[0])
  881. rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
  882. regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
  883. regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
  884. writel(regval, priv->baseaddr + ECC_POISON0_OFST);
  885. regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
  886. regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
  887. regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
  888. writel(regval, priv->baseaddr + ECC_POISON1_OFST);
  889. }
  890. static ssize_t inject_data_error_show(struct device *dev,
  891. struct device_attribute *mattr,
  892. char *data)
  893. {
  894. struct mem_ctl_info *mci = to_mci(dev);
  895. struct synps_edac_priv *priv = mci->pvt_info;
  896. return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
  897. "Error injection Address: 0x%lx\n\r",
  898. readl(priv->baseaddr + ECC_POISON0_OFST),
  899. readl(priv->baseaddr + ECC_POISON1_OFST),
  900. priv->poison_addr);
  901. }
  902. static ssize_t inject_data_error_store(struct device *dev,
  903. struct device_attribute *mattr,
  904. const char *data, size_t count)
  905. {
  906. struct mem_ctl_info *mci = to_mci(dev);
  907. struct synps_edac_priv *priv = mci->pvt_info;
  908. if (kstrtoul(data, 0, &priv->poison_addr))
  909. return -EINVAL;
  910. ddr_poison_setup(priv);
  911. return count;
  912. }
  913. static ssize_t inject_data_poison_show(struct device *dev,
  914. struct device_attribute *mattr,
  915. char *data)
  916. {
  917. struct mem_ctl_info *mci = to_mci(dev);
  918. struct synps_edac_priv *priv = mci->pvt_info;
  919. return sprintf(data, "Data Poisoning: %s\n\r",
  920. (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
  921. ? ("Correctable Error") : ("UnCorrectable Error"));
  922. }
  923. static ssize_t inject_data_poison_store(struct device *dev,
  924. struct device_attribute *mattr,
  925. const char *data, size_t count)
  926. {
  927. struct mem_ctl_info *mci = to_mci(dev);
  928. struct synps_edac_priv *priv = mci->pvt_info;
  929. writel(0, priv->baseaddr + DDRC_SWCTL);
  930. if (strncmp(data, "CE", 2) == 0)
  931. writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
  932. else
  933. writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
  934. writel(1, priv->baseaddr + DDRC_SWCTL);
  935. return count;
  936. }
  937. static DEVICE_ATTR_RW(inject_data_error);
  938. static DEVICE_ATTR_RW(inject_data_poison);
  939. static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
  940. {
  941. int rc;
  942. rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
  943. if (rc < 0)
  944. return rc;
  945. rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
  946. if (rc < 0)
  947. return rc;
  948. return 0;
  949. }
  950. static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
  951. {
  952. device_remove_file(&mci->dev, &dev_attr_inject_data_error);
  953. device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
  954. }
  955. static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  956. {
  957. u32 addrmap_row_b2_10;
  958. int index;
  959. priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
  960. priv->row_shift[1] = ((addrmap[5] >> 8) &
  961. ROW_MAX_VAL_MASK) + ROW_B1_BASE;
  962. addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
  963. if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
  964. for (index = 2; index < 11; index++)
  965. priv->row_shift[index] = addrmap_row_b2_10 +
  966. index + ROW_B0_BASE;
  967. } else {
  968. priv->row_shift[2] = (addrmap[9] &
  969. ROW_MAX_VAL_MASK) + ROW_B2_BASE;
  970. priv->row_shift[3] = ((addrmap[9] >> 8) &
  971. ROW_MAX_VAL_MASK) + ROW_B3_BASE;
  972. priv->row_shift[4] = ((addrmap[9] >> 16) &
  973. ROW_MAX_VAL_MASK) + ROW_B4_BASE;
  974. priv->row_shift[5] = ((addrmap[9] >> 24) &
  975. ROW_MAX_VAL_MASK) + ROW_B5_BASE;
  976. priv->row_shift[6] = (addrmap[10] &
  977. ROW_MAX_VAL_MASK) + ROW_B6_BASE;
  978. priv->row_shift[7] = ((addrmap[10] >> 8) &
  979. ROW_MAX_VAL_MASK) + ROW_B7_BASE;
  980. priv->row_shift[8] = ((addrmap[10] >> 16) &
  981. ROW_MAX_VAL_MASK) + ROW_B8_BASE;
  982. priv->row_shift[9] = ((addrmap[10] >> 24) &
  983. ROW_MAX_VAL_MASK) + ROW_B9_BASE;
  984. priv->row_shift[10] = (addrmap[11] &
  985. ROW_MAX_VAL_MASK) + ROW_B10_BASE;
  986. }
  987. priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
  988. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
  989. ROW_MAX_VAL_MASK) + ROW_B11_BASE);
  990. priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
  991. ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
  992. ROW_MAX_VAL_MASK) + ROW_B12_BASE);
  993. priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
  994. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
  995. ROW_MAX_VAL_MASK) + ROW_B13_BASE);
  996. priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
  997. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
  998. ROW_MAX_VAL_MASK) + ROW_B14_BASE);
  999. priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
  1000. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
  1001. ROW_MAX_VAL_MASK) + ROW_B15_BASE);
  1002. priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
  1003. ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
  1004. ROW_MAX_VAL_MASK) + ROW_B16_BASE);
  1005. priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
  1006. ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
  1007. ROW_MAX_VAL_MASK) + ROW_B17_BASE);
  1008. }
  1009. static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1010. {
  1011. u32 width, memtype;
  1012. int index;
  1013. memtype = readl(priv->baseaddr + CTRL_OFST);
  1014. width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
  1015. priv->col_shift[0] = 0;
  1016. priv->col_shift[1] = 1;
  1017. priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
  1018. priv->col_shift[3] = ((addrmap[2] >> 8) &
  1019. COL_MAX_VAL_MASK) + COL_B3_BASE;
  1020. priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
  1021. COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
  1022. COL_MAX_VAL_MASK) + COL_B4_BASE);
  1023. priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
  1024. COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
  1025. COL_MAX_VAL_MASK) + COL_B5_BASE);
  1026. priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
  1027. COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
  1028. COL_MAX_VAL_MASK) + COL_B6_BASE);
  1029. priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
  1030. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
  1031. COL_MAX_VAL_MASK) + COL_B7_BASE);
  1032. priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
  1033. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
  1034. COL_MAX_VAL_MASK) + COL_B8_BASE);
  1035. priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
  1036. COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
  1037. COL_MAX_VAL_MASK) + COL_B9_BASE);
  1038. if (width == DDRCTL_EWDTH_64) {
  1039. if (memtype & MEM_TYPE_LPDDR3) {
  1040. priv->col_shift[10] = ((addrmap[4] &
  1041. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1042. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1043. COL_B10_BASE);
  1044. priv->col_shift[11] = (((addrmap[4] >> 8) &
  1045. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1046. (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
  1047. COL_B11_BASE);
  1048. } else {
  1049. priv->col_shift[11] = ((addrmap[4] &
  1050. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1051. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1052. COL_B10_BASE);
  1053. priv->col_shift[13] = (((addrmap[4] >> 8) &
  1054. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1055. (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
  1056. COL_B11_BASE);
  1057. }
  1058. } else if (width == DDRCTL_EWDTH_32) {
  1059. if (memtype & MEM_TYPE_LPDDR3) {
  1060. priv->col_shift[10] = (((addrmap[3] >> 24) &
  1061. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1062. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1063. COL_B9_BASE);
  1064. priv->col_shift[11] = ((addrmap[4] &
  1065. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1066. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1067. COL_B10_BASE);
  1068. } else {
  1069. priv->col_shift[11] = (((addrmap[3] >> 24) &
  1070. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1071. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1072. COL_B9_BASE);
  1073. priv->col_shift[13] = ((addrmap[4] &
  1074. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1075. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1076. COL_B10_BASE);
  1077. }
  1078. } else {
  1079. if (memtype & MEM_TYPE_LPDDR3) {
  1080. priv->col_shift[10] = (((addrmap[3] >> 16) &
  1081. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1082. (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
  1083. COL_B8_BASE);
  1084. priv->col_shift[11] = (((addrmap[3] >> 24) &
  1085. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1086. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1087. COL_B9_BASE);
  1088. priv->col_shift[13] = ((addrmap[4] &
  1089. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1090. ((addrmap[4] & COL_MAX_VAL_MASK) +
  1091. COL_B10_BASE);
  1092. } else {
  1093. priv->col_shift[11] = (((addrmap[3] >> 16) &
  1094. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1095. (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
  1096. COL_B8_BASE);
  1097. priv->col_shift[13] = (((addrmap[3] >> 24) &
  1098. COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
  1099. (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
  1100. COL_B9_BASE);
  1101. }
  1102. }
  1103. if (width) {
  1104. for (index = 9; index > width; index--) {
  1105. priv->col_shift[index] = priv->col_shift[index - width];
  1106. priv->col_shift[index - width] = 0;
  1107. }
  1108. }
  1109. }
  1110. static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1111. {
  1112. priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
  1113. priv->bank_shift[1] = ((addrmap[1] >> 8) &
  1114. BANK_MAX_VAL_MASK) + BANK_B1_BASE;
  1115. priv->bank_shift[2] = (((addrmap[1] >> 16) &
  1116. BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
  1117. (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
  1118. BANK_B2_BASE);
  1119. }
  1120. static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1121. {
  1122. priv->bankgrp_shift[0] = (addrmap[8] &
  1123. BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
  1124. priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
  1125. BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
  1126. & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
  1127. }
  1128. static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
  1129. {
  1130. priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
  1131. RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
  1132. RANK_MAX_VAL_MASK) + RANK_B0_BASE);
  1133. }
  1134. /**
  1135. * setup_address_map - Set Address Map by querying ADDRMAP registers.
  1136. * @priv: DDR memory controller private instance data.
  1137. *
  1138. * Set Address Map by querying ADDRMAP registers.
  1139. *
  1140. * Return: none.
  1141. */
  1142. static void setup_address_map(struct synps_edac_priv *priv)
  1143. {
  1144. u32 addrmap[12];
  1145. int index;
  1146. for (index = 0; index < 12; index++) {
  1147. u32 addrmap_offset;
  1148. addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
  1149. addrmap[index] = readl(priv->baseaddr + addrmap_offset);
  1150. }
  1151. setup_row_address_map(priv, addrmap);
  1152. setup_column_address_map(priv, addrmap);
  1153. setup_bank_address_map(priv, addrmap);
  1154. setup_bg_address_map(priv, addrmap);
  1155. setup_rank_address_map(priv, addrmap);
  1156. }
  1157. #endif /* CONFIG_EDAC_DEBUG */
  1158. /**
  1159. * mc_probe - Check controller and bind driver.
  1160. * @pdev: platform device.
  1161. *
  1162. * Probe a specific controller instance for binding with the driver.
  1163. *
  1164. * Return: 0 if the controller instance was successfully bound to the
  1165. * driver; otherwise, < 0 on error.
  1166. */
  1167. static int mc_probe(struct platform_device *pdev)
  1168. {
  1169. const struct synps_platform_data *p_data;
  1170. struct edac_mc_layer layers[2];
  1171. struct synps_edac_priv *priv;
  1172. struct mem_ctl_info *mci;
  1173. void __iomem *baseaddr;
  1174. int rc;
  1175. baseaddr = devm_platform_ioremap_resource(pdev, 0);
  1176. if (IS_ERR(baseaddr))
  1177. return PTR_ERR(baseaddr);
  1178. p_data = of_device_get_match_data(&pdev->dev);
  1179. if (!p_data)
  1180. return -ENODEV;
  1181. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  1182. layers[0].size = SYNPS_EDAC_NR_CSROWS;
  1183. layers[0].is_virt_csrow = true;
  1184. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  1185. layers[1].size = SYNPS_EDAC_NR_CHANS;
  1186. layers[1].is_virt_csrow = false;
  1187. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
  1188. sizeof(struct synps_edac_priv));
  1189. if (!mci) {
  1190. edac_printk(KERN_ERR, EDAC_MC,
  1191. "Failed memory allocation for mc instance\n");
  1192. return -ENOMEM;
  1193. }
  1194. priv = mci->pvt_info;
  1195. priv->baseaddr = baseaddr;
  1196. priv->p_data = p_data;
  1197. if (!get_ecc_state(priv)) {
  1198. edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
  1199. rc = -ENODEV;
  1200. goto free_edac_mc;
  1201. }
  1202. spin_lock_init(&priv->reglock);
  1203. mc_init(mci, pdev);
  1204. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
  1205. rc = setup_irq(mci, pdev);
  1206. if (rc)
  1207. goto free_edac_mc;
  1208. }
  1209. rc = edac_mc_add_mc(mci);
  1210. if (rc) {
  1211. edac_printk(KERN_ERR, EDAC_MC,
  1212. "Failed to register with EDAC core\n");
  1213. goto free_edac_mc;
  1214. }
  1215. #ifdef CONFIG_EDAC_DEBUG
  1216. if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
  1217. rc = edac_create_sysfs_attributes(mci);
  1218. if (rc) {
  1219. edac_printk(KERN_ERR, EDAC_MC,
  1220. "Failed to create sysfs entries\n");
  1221. goto free_edac_mc;
  1222. }
  1223. }
  1224. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
  1225. setup_address_map(priv);
  1226. #endif
  1227. /*
  1228. * Start capturing the correctable and uncorrectable errors. A write of
  1229. * 0 starts the counters.
  1230. */
  1231. if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
  1232. writel(0x0, baseaddr + ECC_CTRL_OFST);
  1233. return rc;
  1234. free_edac_mc:
  1235. edac_mc_free(mci);
  1236. return rc;
  1237. }
  1238. /**
  1239. * mc_remove - Unbind driver from controller.
  1240. * @pdev: Platform device.
  1241. *
  1242. * Return: Unconditionally 0
  1243. */
  1244. static void mc_remove(struct platform_device *pdev)
  1245. {
  1246. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  1247. struct synps_edac_priv *priv = mci->pvt_info;
  1248. if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
  1249. disable_intr(priv);
  1250. #ifdef CONFIG_EDAC_DEBUG
  1251. if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
  1252. edac_remove_sysfs_attributes(mci);
  1253. #endif
  1254. edac_mc_del_mc(&pdev->dev);
  1255. edac_mc_free(mci);
  1256. }
  1257. static struct platform_driver synps_edac_mc_driver = {
  1258. .driver = {
  1259. .name = "synopsys-edac",
  1260. .of_match_table = synps_edac_match,
  1261. },
  1262. .probe = mc_probe,
  1263. .remove_new = mc_remove,
  1264. };
  1265. module_platform_driver(synps_edac_mc_driver);
  1266. MODULE_AUTHOR("Xilinx Inc");
  1267. MODULE_DESCRIPTION("Synopsys DDR ECC driver");
  1268. MODULE_LICENSE("GPL v2");