xilinx_sdfec.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xilinx SDFEC
  4. *
  5. * Copyright (C) 2019 Xilinx, Inc.
  6. *
  7. * Description:
  8. * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
  9. * IP. It exposes a char device which supports file operations
  10. * like open(), close() and ioctl().
  11. */
  12. #include <linux/miscdevice.h>
  13. #include <linux/io.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/poll.h>
  20. #include <linux/slab.h>
  21. #include <linux/clk.h>
  22. #include <linux/compat.h>
  23. #include <linux/highmem.h>
  24. #include <uapi/misc/xilinx_sdfec.h>
  25. #define DEV_NAME_LEN 12
  26. static DEFINE_IDA(dev_nrs);
  27. /* Xilinx SDFEC Register Map */
  28. /* CODE_WRI_PROTECT Register */
  29. #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
  30. /* ACTIVE Register */
  31. #define XSDFEC_ACTIVE_ADDR (0x8)
  32. #define XSDFEC_IS_ACTIVITY_SET (0x1)
  33. /* AXIS_WIDTH Register */
  34. #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
  35. #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
  36. #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
  37. #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
  38. #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
  39. /* AXIS_ENABLE Register */
  40. #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
  41. #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
  42. #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
  43. #define XSDFEC_AXIS_ENABLE_MASK \
  44. (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
  45. /* FEC_CODE Register */
  46. #define XSDFEC_FEC_CODE_ADDR (0x14)
  47. /* ORDER Register Map */
  48. #define XSDFEC_ORDER_ADDR (0x18)
  49. /* Interrupt Status Register */
  50. #define XSDFEC_ISR_ADDR (0x1C)
  51. /* Interrupt Status Register Bit Mask */
  52. #define XSDFEC_ISR_MASK (0x3F)
  53. /* Write Only - Interrupt Enable Register */
  54. #define XSDFEC_IER_ADDR (0x20)
  55. /* Write Only - Interrupt Disable Register */
  56. #define XSDFEC_IDR_ADDR (0x24)
  57. /* Read Only - Interrupt Mask Register */
  58. #define XSDFEC_IMR_ADDR (0x28)
  59. /* ECC Interrupt Status Register */
  60. #define XSDFEC_ECC_ISR_ADDR (0x2C)
  61. /* Single Bit Errors */
  62. #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
  63. /* PL Initialize Single Bit Errors */
  64. #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
  65. /* Multi Bit Errors */
  66. #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
  67. /* PL Initialize Multi Bit Errors */
  68. #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
  69. /* Multi Bit Error to Event Shift */
  70. #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
  71. /* PL Initialize Multi Bit Error to Event Shift */
  72. #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
  73. /* ECC Interrupt Status Bit Mask */
  74. #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
  75. /* ECC Interrupt Status PL Initialize Bit Mask */
  76. #define XSDFEC_PL_INIT_ECC_ISR_MASK \
  77. (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  78. /* ECC Interrupt Status All Bit Mask */
  79. #define XSDFEC_ALL_ECC_ISR_MASK \
  80. (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
  81. /* ECC Interrupt Status Single Bit Errors Mask */
  82. #define XSDFEC_ALL_ECC_ISR_SBE_MASK \
  83. (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
  84. /* ECC Interrupt Status Multi Bit Errors Mask */
  85. #define XSDFEC_ALL_ECC_ISR_MBE_MASK \
  86. (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  87. /* Write Only - ECC Interrupt Enable Register */
  88. #define XSDFEC_ECC_IER_ADDR (0x30)
  89. /* Write Only - ECC Interrupt Disable Register */
  90. #define XSDFEC_ECC_IDR_ADDR (0x34)
  91. /* Read Only - ECC Interrupt Mask Register */
  92. #define XSDFEC_ECC_IMR_ADDR (0x38)
  93. /* BYPASS Register */
  94. #define XSDFEC_BYPASS_ADDR (0x3C)
  95. /* Turbo Code Register */
  96. #define XSDFEC_TURBO_ADDR (0x100)
  97. #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
  98. #define XSDFEC_TURBO_SCALE_BIT_POS (8)
  99. #define XSDFEC_TURBO_SCALE_MAX (15)
  100. /* REG0 Register */
  101. #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
  102. #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
  103. #define XSDFEC_REG0_N_MIN (4)
  104. #define XSDFEC_REG0_N_MAX (32768)
  105. #define XSDFEC_REG0_N_MUL_P (256)
  106. #define XSDFEC_REG0_N_LSB (0)
  107. #define XSDFEC_REG0_K_MIN (2)
  108. #define XSDFEC_REG0_K_MAX (32766)
  109. #define XSDFEC_REG0_K_MUL_P (256)
  110. #define XSDFEC_REG0_K_LSB (16)
  111. /* REG1 Register */
  112. #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
  113. #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
  114. #define XSDFEC_REG1_PSIZE_MIN (2)
  115. #define XSDFEC_REG1_PSIZE_MAX (512)
  116. #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
  117. #define XSDFEC_REG1_NO_PACKING_LSB (10)
  118. #define XSDFEC_REG1_NM_MASK (0xFF800)
  119. #define XSDFEC_REG1_NM_LSB (11)
  120. #define XSDFEC_REG1_BYPASS_MASK (0x100000)
  121. /* REG2 Register */
  122. #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
  123. #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
  124. #define XSDFEC_REG2_NLAYERS_MIN (1)
  125. #define XSDFEC_REG2_NLAYERS_MAX (256)
  126. #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
  127. #define XSDFEC_REG2_NMQC_LSB (9)
  128. #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
  129. #define XSDFEC_REG2_NORM_TYPE_LSB (20)
  130. #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
  131. #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
  132. #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
  133. #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
  134. #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
  135. #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
  136. /* REG3 Register */
  137. #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
  138. #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
  139. #define XSDFEC_REG3_LA_OFF_LSB (8)
  140. #define XSDFEC_REG3_QC_OFF_LSB (16)
  141. #define XSDFEC_LDPC_REG_JUMP (0x10)
  142. #define XSDFEC_REG_WIDTH_JUMP (4)
  143. /* The maximum number of pinned pages */
  144. #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
  145. /**
  146. * struct xsdfec_clks - For managing SD-FEC clocks
  147. * @core_clk: Main processing clock for core
  148. * @axi_clk: AXI4-Lite memory-mapped clock
  149. * @din_words_clk: DIN Words AXI4-Stream Slave clock
  150. * @din_clk: DIN AXI4-Stream Slave clock
  151. * @dout_clk: DOUT Words AXI4-Stream Slave clock
  152. * @dout_words_clk: DOUT AXI4-Stream Slave clock
  153. * @ctrl_clk: Control AXI4-Stream Slave clock
  154. * @status_clk: Status AXI4-Stream Slave clock
  155. */
  156. struct xsdfec_clks {
  157. struct clk *core_clk;
  158. struct clk *axi_clk;
  159. struct clk *din_words_clk;
  160. struct clk *din_clk;
  161. struct clk *dout_clk;
  162. struct clk *dout_words_clk;
  163. struct clk *ctrl_clk;
  164. struct clk *status_clk;
  165. };
  166. /**
  167. * struct xsdfec_dev - Driver data for SDFEC
  168. * @miscdev: Misc device handle
  169. * @clks: Clocks managed by the SDFEC driver
  170. * @waitq: Driver wait queue
  171. * @config: Configuration of the SDFEC device
  172. * @dev_name: Device name
  173. * @flags: spinlock flags
  174. * @regs: device physical base address
  175. * @dev: pointer to device struct
  176. * @state: State of the SDFEC device
  177. * @error_data_lock: Error counter and states spinlock
  178. * @dev_id: Device ID
  179. * @isr_err_count: Count of ISR errors
  180. * @cecc_count: Count of Correctable ECC errors (SBE)
  181. * @uecc_count: Count of Uncorrectable ECC errors (MBE)
  182. * @irq: IRQ number
  183. * @state_updated: indicates State updated by interrupt handler
  184. * @stats_updated: indicates Stats updated by interrupt handler
  185. * @intr_enabled: indicates IRQ enabled
  186. *
  187. * This structure contains necessary state for SDFEC driver to operate
  188. */
  189. struct xsdfec_dev {
  190. struct miscdevice miscdev;
  191. struct xsdfec_clks clks;
  192. wait_queue_head_t waitq;
  193. struct xsdfec_config config;
  194. char dev_name[DEV_NAME_LEN];
  195. unsigned long flags;
  196. void __iomem *regs;
  197. struct device *dev;
  198. enum xsdfec_state state;
  199. /* Spinlock to protect state_updated and stats_updated */
  200. spinlock_t error_data_lock;
  201. int dev_id;
  202. u32 isr_err_count;
  203. u32 cecc_count;
  204. u32 uecc_count;
  205. int irq;
  206. bool state_updated;
  207. bool stats_updated;
  208. bool intr_enabled;
  209. };
  210. static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
  211. u32 value)
  212. {
  213. dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
  214. iowrite32(value, xsdfec->regs + addr);
  215. }
  216. static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
  217. {
  218. u32 rval;
  219. rval = ioread32(xsdfec->regs + addr);
  220. dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
  221. return rval;
  222. }
  223. static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
  224. u32 reg_offset, u32 bit_num,
  225. char *config_value)
  226. {
  227. u32 reg_val;
  228. u32 bit_mask = 1 << bit_num;
  229. reg_val = xsdfec_regread(xsdfec, reg_offset);
  230. *config_value = (reg_val & bit_mask) > 0;
  231. }
  232. static void update_config_from_hw(struct xsdfec_dev *xsdfec)
  233. {
  234. u32 reg_value;
  235. bool sdfec_started;
  236. /* Update the Order */
  237. reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
  238. xsdfec->config.order = reg_value;
  239. update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
  240. 0, /* Bit Number, maybe change to mask */
  241. &xsdfec->config.bypass);
  242. update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
  243. 0, /* Bit Number */
  244. &xsdfec->config.code_wr_protect);
  245. reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  246. xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
  247. reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  248. xsdfec->config.irq.enable_ecc_isr =
  249. (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
  250. reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
  251. sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
  252. if (sdfec_started)
  253. xsdfec->state = XSDFEC_STARTED;
  254. else
  255. xsdfec->state = XSDFEC_STOPPED;
  256. }
  257. static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
  258. {
  259. struct xsdfec_status status;
  260. int err;
  261. memset(&status, 0, sizeof(status));
  262. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  263. status.state = xsdfec->state;
  264. xsdfec->state_updated = false;
  265. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  266. status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
  267. XSDFEC_IS_ACTIVITY_SET);
  268. err = copy_to_user(arg, &status, sizeof(status));
  269. if (err)
  270. err = -EFAULT;
  271. return err;
  272. }
  273. static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
  274. {
  275. int err;
  276. err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
  277. if (err)
  278. err = -EFAULT;
  279. return err;
  280. }
  281. static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
  282. {
  283. u32 mask_read;
  284. if (enable) {
  285. /* Enable */
  286. xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
  287. mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  288. if (mask_read & XSDFEC_ISR_MASK) {
  289. dev_dbg(xsdfec->dev,
  290. "SDFEC enabling irq with IER failed");
  291. return -EIO;
  292. }
  293. } else {
  294. /* Disable */
  295. xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
  296. mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  297. if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
  298. dev_dbg(xsdfec->dev,
  299. "SDFEC disabling irq with IDR failed");
  300. return -EIO;
  301. }
  302. }
  303. return 0;
  304. }
  305. static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
  306. {
  307. u32 mask_read;
  308. if (enable) {
  309. /* Enable */
  310. xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
  311. XSDFEC_ALL_ECC_ISR_MASK);
  312. mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  313. if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
  314. dev_dbg(xsdfec->dev,
  315. "SDFEC enabling ECC irq with ECC IER failed");
  316. return -EIO;
  317. }
  318. } else {
  319. /* Disable */
  320. xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
  321. XSDFEC_ALL_ECC_ISR_MASK);
  322. mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  323. if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
  324. XSDFEC_ECC_ISR_MASK) ||
  325. ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
  326. XSDFEC_PL_INIT_ECC_ISR_MASK))) {
  327. dev_dbg(xsdfec->dev,
  328. "SDFEC disable ECC irq with ECC IDR failed");
  329. return -EIO;
  330. }
  331. }
  332. return 0;
  333. }
  334. static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
  335. {
  336. struct xsdfec_irq irq;
  337. int err;
  338. int isr_err;
  339. int ecc_err;
  340. err = copy_from_user(&irq, arg, sizeof(irq));
  341. if (err)
  342. return -EFAULT;
  343. /* Setup tlast related IRQ */
  344. isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
  345. if (!isr_err)
  346. xsdfec->config.irq.enable_isr = irq.enable_isr;
  347. /* Setup ECC related IRQ */
  348. ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
  349. if (!ecc_err)
  350. xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
  351. if (isr_err < 0 || ecc_err < 0)
  352. err = -EIO;
  353. return err;
  354. }
  355. static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
  356. {
  357. struct xsdfec_turbo turbo;
  358. int err;
  359. u32 turbo_write;
  360. err = copy_from_user(&turbo, arg, sizeof(turbo));
  361. if (err)
  362. return -EFAULT;
  363. if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
  364. return -EINVAL;
  365. if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
  366. return -EINVAL;
  367. /* Check to see what device tree says about the FEC codes */
  368. if (xsdfec->config.code == XSDFEC_LDPC_CODE)
  369. return -EIO;
  370. turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
  371. << XSDFEC_TURBO_SCALE_BIT_POS) |
  372. turbo.alg;
  373. xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
  374. return err;
  375. }
  376. static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
  377. {
  378. u32 reg_value;
  379. struct xsdfec_turbo turbo_params;
  380. int err;
  381. if (xsdfec->config.code == XSDFEC_LDPC_CODE)
  382. return -EIO;
  383. memset(&turbo_params, 0, sizeof(turbo_params));
  384. reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
  385. turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
  386. XSDFEC_TURBO_SCALE_BIT_POS;
  387. turbo_params.alg = reg_value & 0x1;
  388. err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
  389. if (err)
  390. err = -EFAULT;
  391. return err;
  392. }
  393. static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
  394. u32 offset)
  395. {
  396. u32 wdata;
  397. if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
  398. (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
  399. dev_dbg(xsdfec->dev, "N value is not in range");
  400. return -EINVAL;
  401. }
  402. n <<= XSDFEC_REG0_N_LSB;
  403. if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
  404. (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
  405. dev_dbg(xsdfec->dev, "K value is not in range");
  406. return -EINVAL;
  407. }
  408. k = k << XSDFEC_REG0_K_LSB;
  409. wdata = k | n;
  410. if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  411. XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
  412. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
  413. XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
  414. (offset * XSDFEC_LDPC_REG_JUMP));
  415. return -EINVAL;
  416. }
  417. xsdfec_regwrite(xsdfec,
  418. XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
  419. (offset * XSDFEC_LDPC_REG_JUMP),
  420. wdata);
  421. return 0;
  422. }
  423. static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
  424. u32 no_packing, u32 nm, u32 offset)
  425. {
  426. u32 wdata;
  427. if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
  428. dev_dbg(xsdfec->dev, "Psize is not in range");
  429. return -EINVAL;
  430. }
  431. if (no_packing != 0 && no_packing != 1)
  432. dev_dbg(xsdfec->dev, "No-packing bit register invalid");
  433. no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
  434. XSDFEC_REG1_NO_PACKING_MASK);
  435. if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
  436. dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
  437. nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
  438. wdata = nm | no_packing | psize;
  439. if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  440. XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
  441. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
  442. XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
  443. (offset * XSDFEC_LDPC_REG_JUMP));
  444. return -EINVAL;
  445. }
  446. xsdfec_regwrite(xsdfec,
  447. XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
  448. (offset * XSDFEC_LDPC_REG_JUMP),
  449. wdata);
  450. return 0;
  451. }
  452. static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
  453. u32 norm_type, u32 special_qc, u32 no_final_parity,
  454. u32 max_schedule, u32 offset)
  455. {
  456. u32 wdata;
  457. if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
  458. nlayers > XSDFEC_REG2_NLAYERS_MAX) {
  459. dev_dbg(xsdfec->dev, "Nlayers is not in range");
  460. return -EINVAL;
  461. }
  462. if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
  463. dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
  464. nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
  465. if (norm_type > 1)
  466. dev_dbg(xsdfec->dev, "Norm type is invalid");
  467. norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
  468. XSDFEC_REG2_NORM_TYPE_MASK);
  469. if (special_qc > 1)
  470. dev_dbg(xsdfec->dev, "Special QC in invalid");
  471. special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
  472. XSDFEC_REG2_SPECIAL_QC_MASK);
  473. if (no_final_parity > 1)
  474. dev_dbg(xsdfec->dev, "No final parity check invalid");
  475. no_final_parity =
  476. ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
  477. XSDFEC_REG2_NO_FINAL_PARITY_MASK);
  478. if (max_schedule &
  479. ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
  480. dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
  481. max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
  482. XSDFEC_REG2_MAX_SCHEDULE_MASK);
  483. wdata = (max_schedule | no_final_parity | special_qc | norm_type |
  484. nmqc | nlayers);
  485. if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  486. XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
  487. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
  488. XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
  489. (offset * XSDFEC_LDPC_REG_JUMP));
  490. return -EINVAL;
  491. }
  492. xsdfec_regwrite(xsdfec,
  493. XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
  494. (offset * XSDFEC_LDPC_REG_JUMP),
  495. wdata);
  496. return 0;
  497. }
  498. static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
  499. u16 qc_off, u32 offset)
  500. {
  501. u32 wdata;
  502. wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
  503. (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
  504. if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  505. XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
  506. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
  507. XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
  508. (offset * XSDFEC_LDPC_REG_JUMP));
  509. return -EINVAL;
  510. }
  511. xsdfec_regwrite(xsdfec,
  512. XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
  513. (offset * XSDFEC_LDPC_REG_JUMP),
  514. wdata);
  515. return 0;
  516. }
  517. static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
  518. u32 *src_ptr, u32 len, const u32 base_addr,
  519. const u32 depth)
  520. {
  521. u32 reg = 0;
  522. int res, i, nr_pages;
  523. u32 n;
  524. u32 *addr = NULL;
  525. struct page *pages[MAX_NUM_PAGES];
  526. /*
  527. * Writes that go beyond the length of
  528. * Shared Scale(SC) table should fail
  529. */
  530. if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
  531. len > depth / XSDFEC_REG_WIDTH_JUMP ||
  532. offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
  533. dev_dbg(xsdfec->dev, "Write exceeds SC table length");
  534. return -EINVAL;
  535. }
  536. n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
  537. if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
  538. n += 1;
  539. if (WARN_ON_ONCE(n > INT_MAX))
  540. return -EINVAL;
  541. nr_pages = n;
  542. res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
  543. if (res < nr_pages) {
  544. if (res > 0)
  545. unpin_user_pages(pages, res);
  546. return -EINVAL;
  547. }
  548. for (i = 0; i < nr_pages; i++) {
  549. addr = kmap_local_page(pages[i]);
  550. do {
  551. xsdfec_regwrite(xsdfec,
  552. base_addr + ((offset + reg) *
  553. XSDFEC_REG_WIDTH_JUMP),
  554. addr[reg]);
  555. reg++;
  556. } while ((reg < len) &&
  557. ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
  558. kunmap_local(addr);
  559. unpin_user_page(pages[i]);
  560. }
  561. return 0;
  562. }
  563. static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
  564. {
  565. struct xsdfec_ldpc_params *ldpc;
  566. int ret, n;
  567. ldpc = memdup_user(arg, sizeof(*ldpc));
  568. if (IS_ERR(ldpc))
  569. return PTR_ERR(ldpc);
  570. if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
  571. ret = -EIO;
  572. goto err_out;
  573. }
  574. /* Verify Device has not started */
  575. if (xsdfec->state == XSDFEC_STARTED) {
  576. ret = -EIO;
  577. goto err_out;
  578. }
  579. if (xsdfec->config.code_wr_protect) {
  580. ret = -EIO;
  581. goto err_out;
  582. }
  583. /* Write Reg 0 */
  584. ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
  585. ldpc->code_id);
  586. if (ret)
  587. goto err_out;
  588. /* Write Reg 1 */
  589. ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
  590. ldpc->code_id);
  591. if (ret)
  592. goto err_out;
  593. /* Write Reg 2 */
  594. ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
  595. ldpc->norm_type, ldpc->special_qc,
  596. ldpc->no_final_parity, ldpc->max_schedule,
  597. ldpc->code_id);
  598. if (ret)
  599. goto err_out;
  600. /* Write Reg 3 */
  601. ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
  602. ldpc->qc_off, ldpc->code_id);
  603. if (ret)
  604. goto err_out;
  605. /* Write Shared Codes */
  606. n = ldpc->nlayers / 4;
  607. if (ldpc->nlayers % 4)
  608. n++;
  609. ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
  610. XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
  611. XSDFEC_SC_TABLE_DEPTH);
  612. if (ret < 0)
  613. goto err_out;
  614. ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
  615. ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
  616. XSDFEC_LA_TABLE_DEPTH);
  617. if (ret < 0)
  618. goto err_out;
  619. ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
  620. ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
  621. XSDFEC_QC_TABLE_DEPTH);
  622. err_out:
  623. kfree(ldpc);
  624. return ret;
  625. }
  626. static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
  627. {
  628. bool order_invalid;
  629. enum xsdfec_order order;
  630. int err;
  631. err = get_user(order, (enum xsdfec_order __user *)arg);
  632. if (err)
  633. return -EFAULT;
  634. order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
  635. (order != XSDFEC_OUT_OF_ORDER);
  636. if (order_invalid)
  637. return -EINVAL;
  638. /* Verify Device has not started */
  639. if (xsdfec->state == XSDFEC_STARTED)
  640. return -EIO;
  641. xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
  642. xsdfec->config.order = order;
  643. return 0;
  644. }
  645. static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
  646. {
  647. bool bypass;
  648. int err;
  649. err = get_user(bypass, arg);
  650. if (err)
  651. return -EFAULT;
  652. /* Verify Device has not started */
  653. if (xsdfec->state == XSDFEC_STARTED)
  654. return -EIO;
  655. if (bypass)
  656. xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
  657. else
  658. xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
  659. xsdfec->config.bypass = bypass;
  660. return 0;
  661. }
  662. static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
  663. {
  664. u32 reg_value;
  665. bool is_active;
  666. int err;
  667. reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
  668. /* using a double ! operator instead of casting */
  669. is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
  670. err = put_user(is_active, arg);
  671. if (err)
  672. return -EFAULT;
  673. return err;
  674. }
  675. static u32
  676. xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
  677. {
  678. u32 axis_width_field = 0;
  679. switch (axis_width_cfg) {
  680. case XSDFEC_1x128b:
  681. axis_width_field = 0;
  682. break;
  683. case XSDFEC_2x128b:
  684. axis_width_field = 1;
  685. break;
  686. case XSDFEC_4x128b:
  687. axis_width_field = 2;
  688. break;
  689. }
  690. return axis_width_field;
  691. }
  692. static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
  693. axis_word_inc_cfg)
  694. {
  695. u32 axis_words_field = 0;
  696. if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
  697. axis_word_inc_cfg == XSDFEC_IN_BLOCK)
  698. axis_words_field = 0;
  699. else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
  700. axis_words_field = 1;
  701. return axis_words_field;
  702. }
  703. static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
  704. {
  705. u32 reg_value;
  706. u32 dout_words_field;
  707. u32 dout_width_field;
  708. u32 din_words_field;
  709. u32 din_width_field;
  710. struct xsdfec_config *config = &xsdfec->config;
  711. /* translate config info to register values */
  712. dout_words_field =
  713. xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
  714. dout_width_field =
  715. xsdfec_translate_axis_width_cfg_val(config->dout_width);
  716. din_words_field =
  717. xsdfec_translate_axis_words_cfg_val(config->din_word_include);
  718. din_width_field =
  719. xsdfec_translate_axis_width_cfg_val(config->din_width);
  720. reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
  721. reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
  722. reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
  723. reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
  724. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
  725. return 0;
  726. }
  727. static int xsdfec_start(struct xsdfec_dev *xsdfec)
  728. {
  729. u32 regread;
  730. regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
  731. regread &= 0x1;
  732. if (regread != xsdfec->config.code) {
  733. dev_dbg(xsdfec->dev,
  734. "%s SDFEC HW code does not match driver code, reg %d, code %d",
  735. __func__, regread, xsdfec->config.code);
  736. return -EINVAL;
  737. }
  738. /* Set AXIS enable */
  739. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
  740. XSDFEC_AXIS_ENABLE_MASK);
  741. /* Done */
  742. xsdfec->state = XSDFEC_STARTED;
  743. return 0;
  744. }
  745. static int xsdfec_stop(struct xsdfec_dev *xsdfec)
  746. {
  747. u32 regread;
  748. if (xsdfec->state != XSDFEC_STARTED)
  749. dev_dbg(xsdfec->dev, "Device not started correctly");
  750. /* Disable AXIS_ENABLE Input interfaces only */
  751. regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
  752. regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
  753. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
  754. /* Stop */
  755. xsdfec->state = XSDFEC_STOPPED;
  756. return 0;
  757. }
  758. static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
  759. {
  760. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  761. xsdfec->isr_err_count = 0;
  762. xsdfec->uecc_count = 0;
  763. xsdfec->cecc_count = 0;
  764. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  765. return 0;
  766. }
  767. static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
  768. {
  769. int err;
  770. struct xsdfec_stats user_stats;
  771. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  772. user_stats.isr_err_count = xsdfec->isr_err_count;
  773. user_stats.cecc_count = xsdfec->cecc_count;
  774. user_stats.uecc_count = xsdfec->uecc_count;
  775. xsdfec->stats_updated = false;
  776. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  777. err = copy_to_user(arg, &user_stats, sizeof(user_stats));
  778. if (err)
  779. err = -EFAULT;
  780. return err;
  781. }
  782. static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
  783. {
  784. /* Ensure registers are aligned with core configuration */
  785. xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
  786. xsdfec_cfg_axi_streams(xsdfec);
  787. update_config_from_hw(xsdfec);
  788. return 0;
  789. }
  790. static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
  791. unsigned long data)
  792. {
  793. struct xsdfec_dev *xsdfec;
  794. void __user *arg = (void __user *)data;
  795. int rval;
  796. xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
  797. /* In failed state allow only reset and get status IOCTLs */
  798. if (xsdfec->state == XSDFEC_NEEDS_RESET &&
  799. (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
  800. cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
  801. return -EPERM;
  802. }
  803. switch (cmd) {
  804. case XSDFEC_START_DEV:
  805. rval = xsdfec_start(xsdfec);
  806. break;
  807. case XSDFEC_STOP_DEV:
  808. rval = xsdfec_stop(xsdfec);
  809. break;
  810. case XSDFEC_CLEAR_STATS:
  811. rval = xsdfec_clear_stats(xsdfec);
  812. break;
  813. case XSDFEC_GET_STATS:
  814. rval = xsdfec_get_stats(xsdfec, arg);
  815. break;
  816. case XSDFEC_GET_STATUS:
  817. rval = xsdfec_get_status(xsdfec, arg);
  818. break;
  819. case XSDFEC_GET_CONFIG:
  820. rval = xsdfec_get_config(xsdfec, arg);
  821. break;
  822. case XSDFEC_SET_DEFAULT_CONFIG:
  823. rval = xsdfec_set_default_config(xsdfec);
  824. break;
  825. case XSDFEC_SET_IRQ:
  826. rval = xsdfec_set_irq(xsdfec, arg);
  827. break;
  828. case XSDFEC_SET_TURBO:
  829. rval = xsdfec_set_turbo(xsdfec, arg);
  830. break;
  831. case XSDFEC_GET_TURBO:
  832. rval = xsdfec_get_turbo(xsdfec, arg);
  833. break;
  834. case XSDFEC_ADD_LDPC_CODE_PARAMS:
  835. rval = xsdfec_add_ldpc(xsdfec, arg);
  836. break;
  837. case XSDFEC_SET_ORDER:
  838. rval = xsdfec_set_order(xsdfec, arg);
  839. break;
  840. case XSDFEC_SET_BYPASS:
  841. rval = xsdfec_set_bypass(xsdfec, arg);
  842. break;
  843. case XSDFEC_IS_ACTIVE:
  844. rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
  845. break;
  846. default:
  847. rval = -ENOTTY;
  848. break;
  849. }
  850. return rval;
  851. }
  852. static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
  853. {
  854. __poll_t mask = 0;
  855. struct xsdfec_dev *xsdfec;
  856. xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
  857. poll_wait(file, &xsdfec->waitq, wait);
  858. /* XSDFEC ISR detected an error */
  859. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  860. if (xsdfec->state_updated)
  861. mask |= EPOLLIN | EPOLLPRI;
  862. if (xsdfec->stats_updated)
  863. mask |= EPOLLIN | EPOLLRDNORM;
  864. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  865. return mask;
  866. }
  867. static const struct file_operations xsdfec_fops = {
  868. .owner = THIS_MODULE,
  869. .unlocked_ioctl = xsdfec_dev_ioctl,
  870. .poll = xsdfec_poll,
  871. .compat_ioctl = compat_ptr_ioctl,
  872. };
  873. static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
  874. {
  875. struct device *dev = xsdfec->dev;
  876. struct device_node *node = dev->of_node;
  877. int rval;
  878. const char *fec_code;
  879. u32 din_width;
  880. u32 din_word_include;
  881. u32 dout_width;
  882. u32 dout_word_include;
  883. rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
  884. if (rval < 0)
  885. return rval;
  886. if (!strcasecmp(fec_code, "ldpc"))
  887. xsdfec->config.code = XSDFEC_LDPC_CODE;
  888. else if (!strcasecmp(fec_code, "turbo"))
  889. xsdfec->config.code = XSDFEC_TURBO_CODE;
  890. else
  891. return -EINVAL;
  892. rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
  893. &din_word_include);
  894. if (rval < 0)
  895. return rval;
  896. if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
  897. xsdfec->config.din_word_include = din_word_include;
  898. else
  899. return -EINVAL;
  900. rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
  901. if (rval < 0)
  902. return rval;
  903. switch (din_width) {
  904. /* Fall through and set for valid values */
  905. case XSDFEC_1x128b:
  906. case XSDFEC_2x128b:
  907. case XSDFEC_4x128b:
  908. xsdfec->config.din_width = din_width;
  909. break;
  910. default:
  911. return -EINVAL;
  912. }
  913. rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
  914. &dout_word_include);
  915. if (rval < 0)
  916. return rval;
  917. if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
  918. xsdfec->config.dout_word_include = dout_word_include;
  919. else
  920. return -EINVAL;
  921. rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
  922. if (rval < 0)
  923. return rval;
  924. switch (dout_width) {
  925. /* Fall through and set for valid values */
  926. case XSDFEC_1x128b:
  927. case XSDFEC_2x128b:
  928. case XSDFEC_4x128b:
  929. xsdfec->config.dout_width = dout_width;
  930. break;
  931. default:
  932. return -EINVAL;
  933. }
  934. /* Write LDPC to CODE Register */
  935. xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
  936. xsdfec_cfg_axi_streams(xsdfec);
  937. return 0;
  938. }
  939. static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
  940. {
  941. struct xsdfec_dev *xsdfec = dev_id;
  942. irqreturn_t ret = IRQ_HANDLED;
  943. u32 ecc_err;
  944. u32 isr_err;
  945. u32 uecc_count;
  946. u32 cecc_count;
  947. u32 isr_err_count;
  948. u32 aecc_count;
  949. u32 tmp;
  950. WARN_ON(xsdfec->irq != irq);
  951. /* Mask Interrupts */
  952. xsdfec_isr_enable(xsdfec, false);
  953. xsdfec_ecc_isr_enable(xsdfec, false);
  954. /* Read ISR */
  955. ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
  956. isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
  957. /* Clear the interrupts */
  958. xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
  959. xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
  960. tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
  961. /* Count uncorrectable 2-bit errors */
  962. uecc_count = hweight32(tmp);
  963. /* Count all ECC errors */
  964. aecc_count = hweight32(ecc_err);
  965. /* Number of correctable 1-bit ECC error */
  966. cecc_count = aecc_count - 2 * uecc_count;
  967. /* Count ISR errors */
  968. isr_err_count = hweight32(isr_err);
  969. dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
  970. uecc_count, aecc_count, cecc_count, isr_err_count);
  971. dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
  972. xsdfec->cecc_count, xsdfec->isr_err_count);
  973. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  974. /* Add new errors to a 2-bits counter */
  975. if (uecc_count)
  976. xsdfec->uecc_count += uecc_count;
  977. /* Add new errors to a 1-bits counter */
  978. if (cecc_count)
  979. xsdfec->cecc_count += cecc_count;
  980. /* Add new errors to a ISR counter */
  981. if (isr_err_count)
  982. xsdfec->isr_err_count += isr_err_count;
  983. /* Update state/stats flag */
  984. if (uecc_count) {
  985. if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
  986. xsdfec->state = XSDFEC_NEEDS_RESET;
  987. else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  988. xsdfec->state = XSDFEC_PL_RECONFIGURE;
  989. xsdfec->stats_updated = true;
  990. xsdfec->state_updated = true;
  991. }
  992. if (cecc_count)
  993. xsdfec->stats_updated = true;
  994. if (isr_err_count) {
  995. xsdfec->state = XSDFEC_NEEDS_RESET;
  996. xsdfec->stats_updated = true;
  997. xsdfec->state_updated = true;
  998. }
  999. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  1000. dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
  1001. xsdfec->stats_updated);
  1002. /* Enable another polling */
  1003. if (xsdfec->state_updated || xsdfec->stats_updated)
  1004. wake_up_interruptible(&xsdfec->waitq);
  1005. else
  1006. ret = IRQ_NONE;
  1007. /* Unmask Interrupts */
  1008. xsdfec_isr_enable(xsdfec, true);
  1009. xsdfec_ecc_isr_enable(xsdfec, true);
  1010. return ret;
  1011. }
  1012. static int xsdfec_clk_init(struct platform_device *pdev,
  1013. struct xsdfec_clks *clks)
  1014. {
  1015. int err;
  1016. clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
  1017. if (IS_ERR(clks->core_clk)) {
  1018. dev_err(&pdev->dev, "failed to get core_clk");
  1019. return PTR_ERR(clks->core_clk);
  1020. }
  1021. clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
  1022. if (IS_ERR(clks->axi_clk)) {
  1023. dev_err(&pdev->dev, "failed to get axi_clk");
  1024. return PTR_ERR(clks->axi_clk);
  1025. }
  1026. clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
  1027. if (IS_ERR(clks->din_words_clk)) {
  1028. if (PTR_ERR(clks->din_words_clk) != -ENOENT) {
  1029. err = PTR_ERR(clks->din_words_clk);
  1030. return err;
  1031. }
  1032. clks->din_words_clk = NULL;
  1033. }
  1034. clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
  1035. if (IS_ERR(clks->din_clk)) {
  1036. if (PTR_ERR(clks->din_clk) != -ENOENT) {
  1037. err = PTR_ERR(clks->din_clk);
  1038. return err;
  1039. }
  1040. clks->din_clk = NULL;
  1041. }
  1042. clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
  1043. if (IS_ERR(clks->dout_clk)) {
  1044. if (PTR_ERR(clks->dout_clk) != -ENOENT) {
  1045. err = PTR_ERR(clks->dout_clk);
  1046. return err;
  1047. }
  1048. clks->dout_clk = NULL;
  1049. }
  1050. clks->dout_words_clk =
  1051. devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
  1052. if (IS_ERR(clks->dout_words_clk)) {
  1053. if (PTR_ERR(clks->dout_words_clk) != -ENOENT) {
  1054. err = PTR_ERR(clks->dout_words_clk);
  1055. return err;
  1056. }
  1057. clks->dout_words_clk = NULL;
  1058. }
  1059. clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
  1060. if (IS_ERR(clks->ctrl_clk)) {
  1061. if (PTR_ERR(clks->ctrl_clk) != -ENOENT) {
  1062. err = PTR_ERR(clks->ctrl_clk);
  1063. return err;
  1064. }
  1065. clks->ctrl_clk = NULL;
  1066. }
  1067. clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
  1068. if (IS_ERR(clks->status_clk)) {
  1069. if (PTR_ERR(clks->status_clk) != -ENOENT) {
  1070. err = PTR_ERR(clks->status_clk);
  1071. return err;
  1072. }
  1073. clks->status_clk = NULL;
  1074. }
  1075. err = clk_prepare_enable(clks->core_clk);
  1076. if (err) {
  1077. dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
  1078. return err;
  1079. }
  1080. err = clk_prepare_enable(clks->axi_clk);
  1081. if (err) {
  1082. dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
  1083. goto err_disable_core_clk;
  1084. }
  1085. err = clk_prepare_enable(clks->din_clk);
  1086. if (err) {
  1087. dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
  1088. goto err_disable_axi_clk;
  1089. }
  1090. err = clk_prepare_enable(clks->din_words_clk);
  1091. if (err) {
  1092. dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
  1093. goto err_disable_din_clk;
  1094. }
  1095. err = clk_prepare_enable(clks->dout_clk);
  1096. if (err) {
  1097. dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
  1098. goto err_disable_din_words_clk;
  1099. }
  1100. err = clk_prepare_enable(clks->dout_words_clk);
  1101. if (err) {
  1102. dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
  1103. err);
  1104. goto err_disable_dout_clk;
  1105. }
  1106. err = clk_prepare_enable(clks->ctrl_clk);
  1107. if (err) {
  1108. dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
  1109. goto err_disable_dout_words_clk;
  1110. }
  1111. err = clk_prepare_enable(clks->status_clk);
  1112. if (err) {
  1113. dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
  1114. goto err_disable_ctrl_clk;
  1115. }
  1116. return err;
  1117. err_disable_ctrl_clk:
  1118. clk_disable_unprepare(clks->ctrl_clk);
  1119. err_disable_dout_words_clk:
  1120. clk_disable_unprepare(clks->dout_words_clk);
  1121. err_disable_dout_clk:
  1122. clk_disable_unprepare(clks->dout_clk);
  1123. err_disable_din_words_clk:
  1124. clk_disable_unprepare(clks->din_words_clk);
  1125. err_disable_din_clk:
  1126. clk_disable_unprepare(clks->din_clk);
  1127. err_disable_axi_clk:
  1128. clk_disable_unprepare(clks->axi_clk);
  1129. err_disable_core_clk:
  1130. clk_disable_unprepare(clks->core_clk);
  1131. return err;
  1132. }
  1133. static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
  1134. {
  1135. clk_disable_unprepare(clks->status_clk);
  1136. clk_disable_unprepare(clks->ctrl_clk);
  1137. clk_disable_unprepare(clks->dout_words_clk);
  1138. clk_disable_unprepare(clks->dout_clk);
  1139. clk_disable_unprepare(clks->din_words_clk);
  1140. clk_disable_unprepare(clks->din_clk);
  1141. clk_disable_unprepare(clks->core_clk);
  1142. clk_disable_unprepare(clks->axi_clk);
  1143. }
  1144. static int xsdfec_probe(struct platform_device *pdev)
  1145. {
  1146. struct xsdfec_dev *xsdfec;
  1147. struct device *dev;
  1148. int err;
  1149. bool irq_enabled = true;
  1150. xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
  1151. if (!xsdfec)
  1152. return -ENOMEM;
  1153. xsdfec->dev = &pdev->dev;
  1154. spin_lock_init(&xsdfec->error_data_lock);
  1155. err = xsdfec_clk_init(pdev, &xsdfec->clks);
  1156. if (err)
  1157. return err;
  1158. dev = xsdfec->dev;
  1159. xsdfec->regs = devm_platform_ioremap_resource(pdev, 0);
  1160. if (IS_ERR(xsdfec->regs)) {
  1161. err = PTR_ERR(xsdfec->regs);
  1162. goto err_xsdfec_dev;
  1163. }
  1164. xsdfec->irq = platform_get_irq(pdev, 0);
  1165. if (xsdfec->irq < 0) {
  1166. dev_dbg(dev, "platform_get_irq failed");
  1167. irq_enabled = false;
  1168. }
  1169. err = xsdfec_parse_of(xsdfec);
  1170. if (err < 0)
  1171. goto err_xsdfec_dev;
  1172. update_config_from_hw(xsdfec);
  1173. /* Save driver private data */
  1174. platform_set_drvdata(pdev, xsdfec);
  1175. if (irq_enabled) {
  1176. init_waitqueue_head(&xsdfec->waitq);
  1177. /* Register IRQ thread */
  1178. err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
  1179. xsdfec_irq_thread, IRQF_ONESHOT,
  1180. "xilinx-sdfec16", xsdfec);
  1181. if (err < 0) {
  1182. dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
  1183. goto err_xsdfec_dev;
  1184. }
  1185. }
  1186. err = ida_alloc(&dev_nrs, GFP_KERNEL);
  1187. if (err < 0)
  1188. goto err_xsdfec_dev;
  1189. xsdfec->dev_id = err;
  1190. snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
  1191. xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
  1192. xsdfec->miscdev.name = xsdfec->dev_name;
  1193. xsdfec->miscdev.fops = &xsdfec_fops;
  1194. xsdfec->miscdev.parent = dev;
  1195. err = misc_register(&xsdfec->miscdev);
  1196. if (err) {
  1197. dev_err(dev, "error:%d. Unable to register device", err);
  1198. goto err_xsdfec_ida;
  1199. }
  1200. return 0;
  1201. err_xsdfec_ida:
  1202. ida_free(&dev_nrs, xsdfec->dev_id);
  1203. err_xsdfec_dev:
  1204. xsdfec_disable_all_clks(&xsdfec->clks);
  1205. return err;
  1206. }
  1207. static void xsdfec_remove(struct platform_device *pdev)
  1208. {
  1209. struct xsdfec_dev *xsdfec;
  1210. xsdfec = platform_get_drvdata(pdev);
  1211. misc_deregister(&xsdfec->miscdev);
  1212. ida_free(&dev_nrs, xsdfec->dev_id);
  1213. xsdfec_disable_all_clks(&xsdfec->clks);
  1214. }
  1215. static const struct of_device_id xsdfec_of_match[] = {
  1216. {
  1217. .compatible = "xlnx,sd-fec-1.1",
  1218. },
  1219. { /* end of table */ }
  1220. };
  1221. MODULE_DEVICE_TABLE(of, xsdfec_of_match);
  1222. static struct platform_driver xsdfec_driver = {
  1223. .driver = {
  1224. .name = "xilinx-sdfec",
  1225. .of_match_table = xsdfec_of_match,
  1226. },
  1227. .probe = xsdfec_probe,
  1228. .remove_new = xsdfec_remove,
  1229. };
  1230. module_platform_driver(xsdfec_driver);
  1231. MODULE_AUTHOR("Xilinx, Inc");
  1232. MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
  1233. MODULE_LICENSE("GPL");