xilinx_can.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641
  1. /* Xilinx CAN device driver
  2. *
  3. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  4. * Copyright (C) 2009 PetaLogix. All rights reserved.
  5. * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
  6. *
  7. * Description:
  8. * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
  9. * This program is free software: you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation, either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/io.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/of.h>
  28. #include <linux/of_device.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/string.h>
  33. #include <linux/types.h>
  34. #include <linux/can/dev.h>
  35. #include <linux/can/error.h>
  36. #include <linux/can/led.h>
  37. #include <linux/pm_runtime.h>
  38. #define DRIVER_NAME "xilinx_can"
  39. /* CAN registers set */
  40. enum xcan_reg {
  41. XCAN_SRR_OFFSET = 0x00, /* Software reset */
  42. XCAN_MSR_OFFSET = 0x04, /* Mode select */
  43. XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
  44. XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
  45. XCAN_ECR_OFFSET = 0x10, /* Error counter */
  46. XCAN_ESR_OFFSET = 0x14, /* Error status */
  47. XCAN_SR_OFFSET = 0x18, /* Status */
  48. XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
  49. XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
  50. XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
  51. /* not on CAN FD cores */
  52. XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
  53. XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
  54. XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
  55. /* only on CAN FD cores */
  56. XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
  57. XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
  58. XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
  59. XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
  60. XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
  61. };
  62. #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
  63. #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
  64. #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
  65. #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
  66. #define XCAN_CANFD_FRAME_SIZE 0x48
  67. #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
  68. XCAN_CANFD_FRAME_SIZE * (n))
  69. #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
  70. XCAN_CANFD_FRAME_SIZE * (n))
  71. /* the single TX mailbox used by this driver on CAN FD HW */
  72. #define XCAN_TX_MAILBOX_IDX 0
  73. /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
  74. #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
  75. #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
  76. #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
  77. #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
  78. #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
  79. #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
  80. #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
  81. #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
  82. #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
  83. #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
  84. #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
  85. #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
  86. #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
  87. #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
  88. #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
  89. #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
  90. #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
  91. #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
  92. #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
  93. #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
  94. #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
  95. #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
  96. #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
  97. #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
  98. #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
  99. #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
  100. #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
  101. #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
  102. #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
  103. #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
  104. #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
  105. #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
  106. #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
  107. #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
  108. #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
  109. #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
  110. #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
  111. #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
  112. #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
  113. #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
  114. #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
  115. #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
  116. #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
  117. #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
  118. #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
  119. /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
  120. #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
  121. #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
  122. #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
  123. #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
  124. #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
  125. #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
  126. #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
  127. #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
  128. /* CAN frame length constants */
  129. #define XCAN_FRAME_MAX_DATA_LEN 8
  130. #define XCAN_TIMEOUT (1 * HZ)
  131. /* TX-FIFO-empty interrupt available */
  132. #define XCAN_FLAG_TXFEMP 0x0001
  133. /* RX Match Not Finished interrupt available */
  134. #define XCAN_FLAG_RXMNF 0x0002
  135. /* Extended acceptance filters with control at 0xE0 */
  136. #define XCAN_FLAG_EXT_FILTERS 0x0004
  137. /* TX mailboxes instead of TX FIFO */
  138. #define XCAN_FLAG_TX_MAILBOXES 0x0008
  139. /* RX FIFO with each buffer in separate registers at 0x1100
  140. * instead of the regular FIFO at 0x50
  141. */
  142. #define XCAN_FLAG_RX_FIFO_MULTI 0x0010
  143. struct xcan_devtype_data {
  144. unsigned int flags;
  145. const struct can_bittiming_const *bittiming_const;
  146. const char *bus_clk_name;
  147. unsigned int btr_ts2_shift;
  148. unsigned int btr_sjw_shift;
  149. };
  150. /**
  151. * struct xcan_priv - This definition define CAN driver instance
  152. * @can: CAN private data structure.
  153. * @tx_lock: Lock for synchronizing TX interrupt handling
  154. * @tx_head: Tx CAN packets ready to send on the queue
  155. * @tx_tail: Tx CAN packets successfully sended on the queue
  156. * @tx_max: Maximum number packets the driver can send
  157. * @napi: NAPI structure
  158. * @read_reg: For reading data from CAN registers
  159. * @write_reg: For writing data to CAN registers
  160. * @dev: Network device data structure
  161. * @reg_base: Ioremapped address to registers
  162. * @irq_flags: For request_irq()
  163. * @bus_clk: Pointer to struct clk
  164. * @can_clk: Pointer to struct clk
  165. * @devtype: Device type specific constants
  166. */
  167. struct xcan_priv {
  168. struct can_priv can;
  169. spinlock_t tx_lock;
  170. unsigned int tx_head;
  171. unsigned int tx_tail;
  172. unsigned int tx_max;
  173. struct napi_struct napi;
  174. u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
  175. void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
  176. u32 val);
  177. struct device *dev;
  178. void __iomem *reg_base;
  179. unsigned long irq_flags;
  180. struct clk *bus_clk;
  181. struct clk *can_clk;
  182. struct xcan_devtype_data devtype;
  183. };
  184. /* CAN Bittiming constants as per Xilinx CAN specs */
  185. static const struct can_bittiming_const xcan_bittiming_const = {
  186. .name = DRIVER_NAME,
  187. .tseg1_min = 1,
  188. .tseg1_max = 16,
  189. .tseg2_min = 1,
  190. .tseg2_max = 8,
  191. .sjw_max = 4,
  192. .brp_min = 1,
  193. .brp_max = 256,
  194. .brp_inc = 1,
  195. };
  196. static const struct can_bittiming_const xcan_bittiming_const_canfd = {
  197. .name = DRIVER_NAME,
  198. .tseg1_min = 1,
  199. .tseg1_max = 64,
  200. .tseg2_min = 1,
  201. .tseg2_max = 16,
  202. .sjw_max = 16,
  203. .brp_min = 1,
  204. .brp_max = 256,
  205. .brp_inc = 1,
  206. };
  207. /**
  208. * xcan_write_reg_le - Write a value to the device register little endian
  209. * @priv: Driver private data structure
  210. * @reg: Register offset
  211. * @val: Value to write at the Register offset
  212. *
  213. * Write data to the paricular CAN register
  214. */
  215. static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
  216. u32 val)
  217. {
  218. iowrite32(val, priv->reg_base + reg);
  219. }
  220. /**
  221. * xcan_read_reg_le - Read a value from the device register little endian
  222. * @priv: Driver private data structure
  223. * @reg: Register offset
  224. *
  225. * Read data from the particular CAN register
  226. * Return: value read from the CAN register
  227. */
  228. static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
  229. {
  230. return ioread32(priv->reg_base + reg);
  231. }
  232. /**
  233. * xcan_write_reg_be - Write a value to the device register big endian
  234. * @priv: Driver private data structure
  235. * @reg: Register offset
  236. * @val: Value to write at the Register offset
  237. *
  238. * Write data to the paricular CAN register
  239. */
  240. static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
  241. u32 val)
  242. {
  243. iowrite32be(val, priv->reg_base + reg);
  244. }
  245. /**
  246. * xcan_read_reg_be - Read a value from the device register big endian
  247. * @priv: Driver private data structure
  248. * @reg: Register offset
  249. *
  250. * Read data from the particular CAN register
  251. * Return: value read from the CAN register
  252. */
  253. static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
  254. {
  255. return ioread32be(priv->reg_base + reg);
  256. }
  257. /**
  258. * xcan_rx_int_mask - Get the mask for the receive interrupt
  259. * @priv: Driver private data structure
  260. *
  261. * Return: The receive interrupt mask used by the driver on this HW
  262. */
  263. static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
  264. {
  265. /* RXNEMP is better suited for our use case as it cannot be cleared
  266. * while the FIFO is non-empty, but CAN FD HW does not have it
  267. */
  268. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
  269. return XCAN_IXR_RXOK_MASK;
  270. else
  271. return XCAN_IXR_RXNEMP_MASK;
  272. }
  273. /**
  274. * set_reset_mode - Resets the CAN device mode
  275. * @ndev: Pointer to net_device structure
  276. *
  277. * This is the driver reset mode routine.The driver
  278. * enters into configuration mode.
  279. *
  280. * Return: 0 on success and failure value on error
  281. */
  282. static int set_reset_mode(struct net_device *ndev)
  283. {
  284. struct xcan_priv *priv = netdev_priv(ndev);
  285. unsigned long timeout;
  286. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  287. timeout = jiffies + XCAN_TIMEOUT;
  288. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
  289. if (time_after(jiffies, timeout)) {
  290. netdev_warn(ndev, "timed out for config mode\n");
  291. return -ETIMEDOUT;
  292. }
  293. usleep_range(500, 10000);
  294. }
  295. /* reset clears FIFOs */
  296. priv->tx_head = 0;
  297. priv->tx_tail = 0;
  298. return 0;
  299. }
  300. /**
  301. * xcan_set_bittiming - CAN set bit timing routine
  302. * @ndev: Pointer to net_device structure
  303. *
  304. * This is the driver set bittiming routine.
  305. * Return: 0 on success and failure value on error
  306. */
  307. static int xcan_set_bittiming(struct net_device *ndev)
  308. {
  309. struct xcan_priv *priv = netdev_priv(ndev);
  310. struct can_bittiming *bt = &priv->can.bittiming;
  311. u32 btr0, btr1;
  312. u32 is_config_mode;
  313. /* Check whether Xilinx CAN is in configuration mode.
  314. * It cannot set bit timing if Xilinx CAN is not in configuration mode.
  315. */
  316. is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
  317. XCAN_SR_CONFIG_MASK;
  318. if (!is_config_mode) {
  319. netdev_alert(ndev,
  320. "BUG! Cannot set bittiming - CAN is not in config mode\n");
  321. return -EPERM;
  322. }
  323. /* Setting Baud Rate prescalar value in BRPR Register */
  324. btr0 = (bt->brp - 1);
  325. /* Setting Time Segment 1 in BTR Register */
  326. btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
  327. /* Setting Time Segment 2 in BTR Register */
  328. btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
  329. /* Setting Synchronous jump width in BTR Register */
  330. btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
  331. priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
  332. priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
  333. netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
  334. priv->read_reg(priv, XCAN_BRPR_OFFSET),
  335. priv->read_reg(priv, XCAN_BTR_OFFSET));
  336. return 0;
  337. }
  338. /**
  339. * xcan_chip_start - This the drivers start routine
  340. * @ndev: Pointer to net_device structure
  341. *
  342. * This is the drivers start routine.
  343. * Based on the State of the CAN device it puts
  344. * the CAN device into a proper mode.
  345. *
  346. * Return: 0 on success and failure value on error
  347. */
  348. static int xcan_chip_start(struct net_device *ndev)
  349. {
  350. struct xcan_priv *priv = netdev_priv(ndev);
  351. u32 reg_msr, reg_sr_mask;
  352. int err;
  353. unsigned long timeout;
  354. u32 ier;
  355. /* Check if it is in reset mode */
  356. err = set_reset_mode(ndev);
  357. if (err < 0)
  358. return err;
  359. err = xcan_set_bittiming(ndev);
  360. if (err < 0)
  361. return err;
  362. /* Enable interrupts */
  363. ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
  364. XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
  365. XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  366. XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
  367. if (priv->devtype.flags & XCAN_FLAG_RXMNF)
  368. ier |= XCAN_IXR_RXMNF_MASK;
  369. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  370. /* Check whether it is loopback mode or normal mode */
  371. if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
  372. reg_msr = XCAN_MSR_LBACK_MASK;
  373. reg_sr_mask = XCAN_SR_LBACK_MASK;
  374. } else {
  375. reg_msr = 0x0;
  376. reg_sr_mask = XCAN_SR_NORMAL_MASK;
  377. }
  378. /* enable the first extended filter, if any, as cores with extended
  379. * filtering default to non-receipt if all filters are disabled
  380. */
  381. if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
  382. priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
  383. priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
  384. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
  385. timeout = jiffies + XCAN_TIMEOUT;
  386. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
  387. if (time_after(jiffies, timeout)) {
  388. netdev_warn(ndev,
  389. "timed out for correct mode\n");
  390. return -ETIMEDOUT;
  391. }
  392. }
  393. netdev_dbg(ndev, "status:#x%08x\n",
  394. priv->read_reg(priv, XCAN_SR_OFFSET));
  395. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  396. return 0;
  397. }
  398. /**
  399. * xcan_do_set_mode - This sets the mode of the driver
  400. * @ndev: Pointer to net_device structure
  401. * @mode: Tells the mode of the driver
  402. *
  403. * This check the drivers state and calls the
  404. * the corresponding modes to set.
  405. *
  406. * Return: 0 on success and failure value on error
  407. */
  408. static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
  409. {
  410. int ret;
  411. switch (mode) {
  412. case CAN_MODE_START:
  413. ret = xcan_chip_start(ndev);
  414. if (ret < 0) {
  415. netdev_err(ndev, "xcan_chip_start failed!\n");
  416. return ret;
  417. }
  418. netif_wake_queue(ndev);
  419. break;
  420. default:
  421. ret = -EOPNOTSUPP;
  422. break;
  423. }
  424. return ret;
  425. }
  426. /**
  427. * xcan_write_frame - Write a frame to HW
  428. * @skb: sk_buff pointer that contains data to be Txed
  429. * @frame_offset: Register offset to write the frame to
  430. */
  431. static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
  432. int frame_offset)
  433. {
  434. u32 id, dlc, data[2] = {0, 0};
  435. struct can_frame *cf = (struct can_frame *)skb->data;
  436. /* Watch carefully on the bit sequence */
  437. if (cf->can_id & CAN_EFF_FLAG) {
  438. /* Extended CAN ID format */
  439. id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
  440. XCAN_IDR_ID2_MASK;
  441. id |= (((cf->can_id & CAN_EFF_MASK) >>
  442. (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
  443. XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
  444. /* The substibute remote TX request bit should be "1"
  445. * for extended frames as in the Xilinx CAN datasheet
  446. */
  447. id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
  448. if (cf->can_id & CAN_RTR_FLAG)
  449. /* Extended frames remote TX request */
  450. id |= XCAN_IDR_RTR_MASK;
  451. } else {
  452. /* Standard CAN ID format */
  453. id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
  454. XCAN_IDR_ID1_MASK;
  455. if (cf->can_id & CAN_RTR_FLAG)
  456. /* Standard frames remote TX request */
  457. id |= XCAN_IDR_SRR_MASK;
  458. }
  459. dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
  460. if (cf->can_dlc > 0)
  461. data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
  462. if (cf->can_dlc > 4)
  463. data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
  464. priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
  465. /* If the CAN frame is RTR frame this write triggers transmission
  466. * (not on CAN FD)
  467. */
  468. priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
  469. if (!(cf->can_id & CAN_RTR_FLAG)) {
  470. priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
  471. data[0]);
  472. /* If the CAN frame is Standard/Extended frame this
  473. * write triggers transmission (not on CAN FD)
  474. */
  475. priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
  476. data[1]);
  477. }
  478. }
  479. /**
  480. * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
  481. *
  482. * Return: 0 on success, -ENOSPC if FIFO is full.
  483. */
  484. static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
  485. {
  486. struct xcan_priv *priv = netdev_priv(ndev);
  487. unsigned long flags;
  488. /* Check if the TX buffer is full */
  489. if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
  490. XCAN_SR_TXFLL_MASK))
  491. return -ENOSPC;
  492. can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
  493. spin_lock_irqsave(&priv->tx_lock, flags);
  494. priv->tx_head++;
  495. xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
  496. /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
  497. if (priv->tx_max > 1)
  498. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
  499. /* Check if the TX buffer is full */
  500. if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
  501. netif_stop_queue(ndev);
  502. spin_unlock_irqrestore(&priv->tx_lock, flags);
  503. return 0;
  504. }
  505. /**
  506. * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
  507. *
  508. * Return: 0 on success, -ENOSPC if there is no space
  509. */
  510. static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
  511. {
  512. struct xcan_priv *priv = netdev_priv(ndev);
  513. unsigned long flags;
  514. if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
  515. BIT(XCAN_TX_MAILBOX_IDX)))
  516. return -ENOSPC;
  517. can_put_echo_skb(skb, ndev, 0);
  518. spin_lock_irqsave(&priv->tx_lock, flags);
  519. priv->tx_head++;
  520. xcan_write_frame(priv, skb,
  521. XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
  522. /* Mark buffer as ready for transmit */
  523. priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
  524. netif_stop_queue(ndev);
  525. spin_unlock_irqrestore(&priv->tx_lock, flags);
  526. return 0;
  527. }
  528. /**
  529. * xcan_start_xmit - Starts the transmission
  530. * @skb: sk_buff pointer that contains data to be Txed
  531. * @ndev: Pointer to net_device structure
  532. *
  533. * This function is invoked from upper layers to initiate transmission.
  534. *
  535. * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
  536. */
  537. static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  538. {
  539. struct xcan_priv *priv = netdev_priv(ndev);
  540. int ret;
  541. if (can_dropped_invalid_skb(ndev, skb))
  542. return NETDEV_TX_OK;
  543. if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
  544. ret = xcan_start_xmit_mailbox(skb, ndev);
  545. else
  546. ret = xcan_start_xmit_fifo(skb, ndev);
  547. if (ret < 0) {
  548. netdev_err(ndev, "BUG!, TX full when queue awake!\n");
  549. netif_stop_queue(ndev);
  550. return NETDEV_TX_BUSY;
  551. }
  552. return NETDEV_TX_OK;
  553. }
  554. /**
  555. * xcan_rx - Is called from CAN isr to complete the received
  556. * frame processing
  557. * @ndev: Pointer to net_device structure
  558. * @frame_base: Register offset to the frame to be read
  559. *
  560. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  561. * does minimal processing and invokes "netif_receive_skb" to complete further
  562. * processing.
  563. * Return: 1 on success and 0 on failure.
  564. */
  565. static int xcan_rx(struct net_device *ndev, int frame_base)
  566. {
  567. struct xcan_priv *priv = netdev_priv(ndev);
  568. struct net_device_stats *stats = &ndev->stats;
  569. struct can_frame *cf;
  570. struct sk_buff *skb;
  571. u32 id_xcan, dlc, data[2] = {0, 0};
  572. skb = alloc_can_skb(ndev, &cf);
  573. if (unlikely(!skb)) {
  574. stats->rx_dropped++;
  575. return 0;
  576. }
  577. /* Read a frame from Xilinx zynq CANPS */
  578. id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
  579. dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
  580. XCAN_DLCR_DLC_SHIFT;
  581. /* Change Xilinx CAN data length format to socketCAN data format */
  582. cf->can_dlc = get_can_dlc(dlc);
  583. /* Change Xilinx CAN ID format to socketCAN ID format */
  584. if (id_xcan & XCAN_IDR_IDE_MASK) {
  585. /* The received frame is an Extended format frame */
  586. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  587. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  588. XCAN_IDR_ID2_SHIFT;
  589. cf->can_id |= CAN_EFF_FLAG;
  590. if (id_xcan & XCAN_IDR_RTR_MASK)
  591. cf->can_id |= CAN_RTR_FLAG;
  592. } else {
  593. /* The received frame is a standard format frame */
  594. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  595. XCAN_IDR_ID1_SHIFT;
  596. if (id_xcan & XCAN_IDR_SRR_MASK)
  597. cf->can_id |= CAN_RTR_FLAG;
  598. }
  599. /* DW1/DW2 must always be read to remove message from RXFIFO */
  600. data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
  601. data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
  602. if (!(cf->can_id & CAN_RTR_FLAG)) {
  603. /* Change Xilinx CAN data format to socketCAN data format */
  604. if (cf->can_dlc > 0)
  605. *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
  606. if (cf->can_dlc > 4)
  607. *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
  608. }
  609. stats->rx_bytes += cf->can_dlc;
  610. stats->rx_packets++;
  611. netif_receive_skb(skb);
  612. return 1;
  613. }
  614. /**
  615. * xcan_current_error_state - Get current error state from HW
  616. * @ndev: Pointer to net_device structure
  617. *
  618. * Checks the current CAN error state from the HW. Note that this
  619. * only checks for ERROR_PASSIVE and ERROR_WARNING.
  620. *
  621. * Return:
  622. * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
  623. * otherwise.
  624. */
  625. static enum can_state xcan_current_error_state(struct net_device *ndev)
  626. {
  627. struct xcan_priv *priv = netdev_priv(ndev);
  628. u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
  629. if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
  630. return CAN_STATE_ERROR_PASSIVE;
  631. else if (status & XCAN_SR_ERRWRN_MASK)
  632. return CAN_STATE_ERROR_WARNING;
  633. else
  634. return CAN_STATE_ERROR_ACTIVE;
  635. }
  636. /**
  637. * xcan_set_error_state - Set new CAN error state
  638. * @ndev: Pointer to net_device structure
  639. * @new_state: The new CAN state to be set
  640. * @cf: Error frame to be populated or NULL
  641. *
  642. * Set new CAN error state for the device, updating statistics and
  643. * populating the error frame if given.
  644. */
  645. static void xcan_set_error_state(struct net_device *ndev,
  646. enum can_state new_state,
  647. struct can_frame *cf)
  648. {
  649. struct xcan_priv *priv = netdev_priv(ndev);
  650. u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
  651. u32 txerr = ecr & XCAN_ECR_TEC_MASK;
  652. u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
  653. enum can_state tx_state = txerr >= rxerr ? new_state : 0;
  654. enum can_state rx_state = txerr <= rxerr ? new_state : 0;
  655. /* non-ERROR states are handled elsewhere */
  656. if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
  657. return;
  658. can_change_state(ndev, cf, tx_state, rx_state);
  659. if (cf) {
  660. cf->data[6] = txerr;
  661. cf->data[7] = rxerr;
  662. }
  663. }
  664. /**
  665. * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
  666. * @ndev: Pointer to net_device structure
  667. *
  668. * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
  669. * the performed RX/TX has caused it to drop to a lesser state and set
  670. * the interface state accordingly.
  671. */
  672. static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
  673. {
  674. struct xcan_priv *priv = netdev_priv(ndev);
  675. enum can_state old_state = priv->can.state;
  676. enum can_state new_state;
  677. /* changing error state due to successful frame RX/TX can only
  678. * occur from these states
  679. */
  680. if (old_state != CAN_STATE_ERROR_WARNING &&
  681. old_state != CAN_STATE_ERROR_PASSIVE)
  682. return;
  683. new_state = xcan_current_error_state(ndev);
  684. if (new_state != old_state) {
  685. struct sk_buff *skb;
  686. struct can_frame *cf;
  687. skb = alloc_can_err_skb(ndev, &cf);
  688. xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
  689. if (skb) {
  690. struct net_device_stats *stats = &ndev->stats;
  691. stats->rx_packets++;
  692. stats->rx_bytes += cf->can_dlc;
  693. netif_rx(skb);
  694. }
  695. }
  696. }
  697. /**
  698. * xcan_err_interrupt - error frame Isr
  699. * @ndev: net_device pointer
  700. * @isr: interrupt status register value
  701. *
  702. * This is the CAN error interrupt and it will
  703. * check the the type of error and forward the error
  704. * frame to upper layers.
  705. */
  706. static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
  707. {
  708. struct xcan_priv *priv = netdev_priv(ndev);
  709. struct net_device_stats *stats = &ndev->stats;
  710. struct can_frame *cf;
  711. struct sk_buff *skb;
  712. u32 err_status;
  713. skb = alloc_can_err_skb(ndev, &cf);
  714. err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
  715. priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
  716. if (isr & XCAN_IXR_BSOFF_MASK) {
  717. priv->can.state = CAN_STATE_BUS_OFF;
  718. priv->can.can_stats.bus_off++;
  719. /* Leave device in Config Mode in bus-off state */
  720. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  721. can_bus_off(ndev);
  722. if (skb)
  723. cf->can_id |= CAN_ERR_BUSOFF;
  724. } else {
  725. enum can_state new_state = xcan_current_error_state(ndev);
  726. if (new_state != priv->can.state)
  727. xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
  728. }
  729. /* Check for Arbitration lost interrupt */
  730. if (isr & XCAN_IXR_ARBLST_MASK) {
  731. priv->can.can_stats.arbitration_lost++;
  732. if (skb) {
  733. cf->can_id |= CAN_ERR_LOSTARB;
  734. cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
  735. }
  736. }
  737. /* Check for RX FIFO Overflow interrupt */
  738. if (isr & XCAN_IXR_RXOFLW_MASK) {
  739. stats->rx_over_errors++;
  740. stats->rx_errors++;
  741. if (skb) {
  742. cf->can_id |= CAN_ERR_CRTL;
  743. cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
  744. }
  745. }
  746. /* Check for RX Match Not Finished interrupt */
  747. if (isr & XCAN_IXR_RXMNF_MASK) {
  748. stats->rx_dropped++;
  749. stats->rx_errors++;
  750. netdev_err(ndev, "RX match not finished, frame discarded\n");
  751. if (skb) {
  752. cf->can_id |= CAN_ERR_CRTL;
  753. cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
  754. }
  755. }
  756. /* Check for error interrupt */
  757. if (isr & XCAN_IXR_ERROR_MASK) {
  758. if (skb)
  759. cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  760. /* Check for Ack error interrupt */
  761. if (err_status & XCAN_ESR_ACKER_MASK) {
  762. stats->tx_errors++;
  763. if (skb) {
  764. cf->can_id |= CAN_ERR_ACK;
  765. cf->data[3] = CAN_ERR_PROT_LOC_ACK;
  766. }
  767. }
  768. /* Check for Bit error interrupt */
  769. if (err_status & XCAN_ESR_BERR_MASK) {
  770. stats->tx_errors++;
  771. if (skb) {
  772. cf->can_id |= CAN_ERR_PROT;
  773. cf->data[2] = CAN_ERR_PROT_BIT;
  774. }
  775. }
  776. /* Check for Stuff error interrupt */
  777. if (err_status & XCAN_ESR_STER_MASK) {
  778. stats->rx_errors++;
  779. if (skb) {
  780. cf->can_id |= CAN_ERR_PROT;
  781. cf->data[2] = CAN_ERR_PROT_STUFF;
  782. }
  783. }
  784. /* Check for Form error interrupt */
  785. if (err_status & XCAN_ESR_FMER_MASK) {
  786. stats->rx_errors++;
  787. if (skb) {
  788. cf->can_id |= CAN_ERR_PROT;
  789. cf->data[2] = CAN_ERR_PROT_FORM;
  790. }
  791. }
  792. /* Check for CRC error interrupt */
  793. if (err_status & XCAN_ESR_CRCER_MASK) {
  794. stats->rx_errors++;
  795. if (skb) {
  796. cf->can_id |= CAN_ERR_PROT;
  797. cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
  798. }
  799. }
  800. priv->can.can_stats.bus_error++;
  801. }
  802. if (skb) {
  803. stats->rx_packets++;
  804. stats->rx_bytes += cf->can_dlc;
  805. netif_rx(skb);
  806. }
  807. netdev_dbg(ndev, "%s: error status register:0x%x\n",
  808. __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
  809. }
  810. /**
  811. * xcan_state_interrupt - It will check the state of the CAN device
  812. * @ndev: net_device pointer
  813. * @isr: interrupt status register value
  814. *
  815. * This will checks the state of the CAN device
  816. * and puts the device into appropriate state.
  817. */
  818. static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
  819. {
  820. struct xcan_priv *priv = netdev_priv(ndev);
  821. /* Check for Sleep interrupt if set put CAN device in sleep state */
  822. if (isr & XCAN_IXR_SLP_MASK)
  823. priv->can.state = CAN_STATE_SLEEPING;
  824. /* Check for Wake up interrupt if set put CAN device in Active state */
  825. if (isr & XCAN_IXR_WKUP_MASK)
  826. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  827. }
  828. /**
  829. * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
  830. *
  831. * Return: Register offset of the next frame in RX FIFO.
  832. */
  833. static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
  834. {
  835. int offset;
  836. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
  837. u32 fsr;
  838. /* clear RXOK before the is-empty check so that any newly
  839. * received frame will reassert it without a race
  840. */
  841. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
  842. fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
  843. /* check if RX FIFO is empty */
  844. if (!(fsr & XCAN_FSR_FL_MASK))
  845. return -ENOENT;
  846. offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
  847. } else {
  848. /* check if RX FIFO is empty */
  849. if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
  850. XCAN_IXR_RXNEMP_MASK))
  851. return -ENOENT;
  852. /* frames are read from a static offset */
  853. offset = XCAN_RXFIFO_OFFSET;
  854. }
  855. return offset;
  856. }
  857. /**
  858. * xcan_rx_poll - Poll routine for rx packets (NAPI)
  859. * @napi: napi structure pointer
  860. * @quota: Max number of rx packets to be processed.
  861. *
  862. * This is the poll routine for rx part.
  863. * It will process the packets maximux quota value.
  864. *
  865. * Return: number of packets received
  866. */
  867. static int xcan_rx_poll(struct napi_struct *napi, int quota)
  868. {
  869. struct net_device *ndev = napi->dev;
  870. struct xcan_priv *priv = netdev_priv(ndev);
  871. u32 ier;
  872. int work_done = 0;
  873. int frame_offset;
  874. while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
  875. (work_done < quota)) {
  876. work_done += xcan_rx(ndev, frame_offset);
  877. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
  878. /* increment read index */
  879. priv->write_reg(priv, XCAN_FSR_OFFSET,
  880. XCAN_FSR_IRI_MASK);
  881. else
  882. /* clear rx-not-empty (will actually clear only if
  883. * empty)
  884. */
  885. priv->write_reg(priv, XCAN_ICR_OFFSET,
  886. XCAN_IXR_RXNEMP_MASK);
  887. }
  888. if (work_done) {
  889. can_led_event(ndev, CAN_LED_EVENT_RX);
  890. xcan_update_error_state_after_rxtx(ndev);
  891. }
  892. if (work_done < quota) {
  893. napi_complete_done(napi, work_done);
  894. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  895. ier |= xcan_rx_int_mask(priv);
  896. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  897. }
  898. return work_done;
  899. }
  900. /**
  901. * xcan_tx_interrupt - Tx Done Isr
  902. * @ndev: net_device pointer
  903. * @isr: Interrupt status register value
  904. */
  905. static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
  906. {
  907. struct xcan_priv *priv = netdev_priv(ndev);
  908. struct net_device_stats *stats = &ndev->stats;
  909. unsigned int frames_in_fifo;
  910. int frames_sent = 1; /* TXOK => at least 1 frame was sent */
  911. unsigned long flags;
  912. int retries = 0;
  913. /* Synchronize with xmit as we need to know the exact number
  914. * of frames in the FIFO to stay in sync due to the TXFEMP
  915. * handling.
  916. * This also prevents a race between netif_wake_queue() and
  917. * netif_stop_queue().
  918. */
  919. spin_lock_irqsave(&priv->tx_lock, flags);
  920. frames_in_fifo = priv->tx_head - priv->tx_tail;
  921. if (WARN_ON_ONCE(frames_in_fifo == 0)) {
  922. /* clear TXOK anyway to avoid getting back here */
  923. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  924. spin_unlock_irqrestore(&priv->tx_lock, flags);
  925. return;
  926. }
  927. /* Check if 2 frames were sent (TXOK only means that at least 1
  928. * frame was sent).
  929. */
  930. if (frames_in_fifo > 1) {
  931. WARN_ON(frames_in_fifo > priv->tx_max);
  932. /* Synchronize TXOK and isr so that after the loop:
  933. * (1) isr variable is up-to-date at least up to TXOK clear
  934. * time. This avoids us clearing a TXOK of a second frame
  935. * but not noticing that the FIFO is now empty and thus
  936. * marking only a single frame as sent.
  937. * (2) No TXOK is left. Having one could mean leaving a
  938. * stray TXOK as we might process the associated frame
  939. * via TXFEMP handling as we read TXFEMP *after* TXOK
  940. * clear to satisfy (1).
  941. */
  942. while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
  943. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  944. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  945. }
  946. if (isr & XCAN_IXR_TXFEMP_MASK) {
  947. /* nothing in FIFO anymore */
  948. frames_sent = frames_in_fifo;
  949. }
  950. } else {
  951. /* single frame in fifo, just clear TXOK */
  952. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  953. }
  954. while (frames_sent--) {
  955. stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
  956. priv->tx_max);
  957. priv->tx_tail++;
  958. stats->tx_packets++;
  959. }
  960. netif_wake_queue(ndev);
  961. spin_unlock_irqrestore(&priv->tx_lock, flags);
  962. can_led_event(ndev, CAN_LED_EVENT_TX);
  963. xcan_update_error_state_after_rxtx(ndev);
  964. }
  965. /**
  966. * xcan_interrupt - CAN Isr
  967. * @irq: irq number
  968. * @dev_id: device id poniter
  969. *
  970. * This is the xilinx CAN Isr. It checks for the type of interrupt
  971. * and invokes the corresponding ISR.
  972. *
  973. * Return:
  974. * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
  975. */
  976. static irqreturn_t xcan_interrupt(int irq, void *dev_id)
  977. {
  978. struct net_device *ndev = (struct net_device *)dev_id;
  979. struct xcan_priv *priv = netdev_priv(ndev);
  980. u32 isr, ier;
  981. u32 isr_errors;
  982. u32 rx_int_mask = xcan_rx_int_mask(priv);
  983. /* Get the interrupt status from Xilinx CAN */
  984. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  985. if (!isr)
  986. return IRQ_NONE;
  987. /* Check for the type of interrupt and Processing it */
  988. if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
  989. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
  990. XCAN_IXR_WKUP_MASK));
  991. xcan_state_interrupt(ndev, isr);
  992. }
  993. /* Check for Tx interrupt and Processing it */
  994. if (isr & XCAN_IXR_TXOK_MASK)
  995. xcan_tx_interrupt(ndev, isr);
  996. /* Check for the type of error interrupt and Processing it */
  997. isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  998. XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
  999. XCAN_IXR_RXMNF_MASK);
  1000. if (isr_errors) {
  1001. priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
  1002. xcan_err_interrupt(ndev, isr);
  1003. }
  1004. /* Check for the type of receive interrupt and Processing it */
  1005. if (isr & rx_int_mask) {
  1006. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  1007. ier &= ~rx_int_mask;
  1008. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  1009. napi_schedule(&priv->napi);
  1010. }
  1011. return IRQ_HANDLED;
  1012. }
  1013. /**
  1014. * xcan_chip_stop - Driver stop routine
  1015. * @ndev: Pointer to net_device structure
  1016. *
  1017. * This is the drivers stop routine. It will disable the
  1018. * interrupts and put the device into configuration mode.
  1019. */
  1020. static void xcan_chip_stop(struct net_device *ndev)
  1021. {
  1022. struct xcan_priv *priv = netdev_priv(ndev);
  1023. /* Disable interrupts and leave the can in configuration mode */
  1024. set_reset_mode(ndev);
  1025. priv->can.state = CAN_STATE_STOPPED;
  1026. }
  1027. /**
  1028. * xcan_open - Driver open routine
  1029. * @ndev: Pointer to net_device structure
  1030. *
  1031. * This is the driver open routine.
  1032. * Return: 0 on success and failure value on error
  1033. */
  1034. static int xcan_open(struct net_device *ndev)
  1035. {
  1036. struct xcan_priv *priv = netdev_priv(ndev);
  1037. int ret;
  1038. ret = pm_runtime_get_sync(priv->dev);
  1039. if (ret < 0) {
  1040. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1041. __func__, ret);
  1042. return ret;
  1043. }
  1044. ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
  1045. ndev->name, ndev);
  1046. if (ret < 0) {
  1047. netdev_err(ndev, "irq allocation for CAN failed\n");
  1048. goto err;
  1049. }
  1050. /* Set chip into reset mode */
  1051. ret = set_reset_mode(ndev);
  1052. if (ret < 0) {
  1053. netdev_err(ndev, "mode resetting failed!\n");
  1054. goto err_irq;
  1055. }
  1056. /* Common open */
  1057. ret = open_candev(ndev);
  1058. if (ret)
  1059. goto err_irq;
  1060. ret = xcan_chip_start(ndev);
  1061. if (ret < 0) {
  1062. netdev_err(ndev, "xcan_chip_start failed!\n");
  1063. goto err_candev;
  1064. }
  1065. can_led_event(ndev, CAN_LED_EVENT_OPEN);
  1066. napi_enable(&priv->napi);
  1067. netif_start_queue(ndev);
  1068. return 0;
  1069. err_candev:
  1070. close_candev(ndev);
  1071. err_irq:
  1072. free_irq(ndev->irq, ndev);
  1073. err:
  1074. pm_runtime_put(priv->dev);
  1075. return ret;
  1076. }
  1077. /**
  1078. * xcan_close - Driver close routine
  1079. * @ndev: Pointer to net_device structure
  1080. *
  1081. * Return: 0 always
  1082. */
  1083. static int xcan_close(struct net_device *ndev)
  1084. {
  1085. struct xcan_priv *priv = netdev_priv(ndev);
  1086. netif_stop_queue(ndev);
  1087. napi_disable(&priv->napi);
  1088. xcan_chip_stop(ndev);
  1089. free_irq(ndev->irq, ndev);
  1090. close_candev(ndev);
  1091. can_led_event(ndev, CAN_LED_EVENT_STOP);
  1092. pm_runtime_put(priv->dev);
  1093. return 0;
  1094. }
  1095. /**
  1096. * xcan_get_berr_counter - error counter routine
  1097. * @ndev: Pointer to net_device structure
  1098. * @bec: Pointer to can_berr_counter structure
  1099. *
  1100. * This is the driver error counter routine.
  1101. * Return: 0 on success and failure value on error
  1102. */
  1103. static int xcan_get_berr_counter(const struct net_device *ndev,
  1104. struct can_berr_counter *bec)
  1105. {
  1106. struct xcan_priv *priv = netdev_priv(ndev);
  1107. int ret;
  1108. ret = pm_runtime_get_sync(priv->dev);
  1109. if (ret < 0) {
  1110. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1111. __func__, ret);
  1112. return ret;
  1113. }
  1114. bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  1115. bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  1116. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  1117. pm_runtime_put(priv->dev);
  1118. return 0;
  1119. }
  1120. static const struct net_device_ops xcan_netdev_ops = {
  1121. .ndo_open = xcan_open,
  1122. .ndo_stop = xcan_close,
  1123. .ndo_start_xmit = xcan_start_xmit,
  1124. .ndo_change_mtu = can_change_mtu,
  1125. };
  1126. /**
  1127. * xcan_suspend - Suspend method for the driver
  1128. * @dev: Address of the device structure
  1129. *
  1130. * Put the driver into low power mode.
  1131. * Return: 0 on success and failure value on error
  1132. */
  1133. static int __maybe_unused xcan_suspend(struct device *dev)
  1134. {
  1135. struct net_device *ndev = dev_get_drvdata(dev);
  1136. if (netif_running(ndev)) {
  1137. netif_stop_queue(ndev);
  1138. netif_device_detach(ndev);
  1139. xcan_chip_stop(ndev);
  1140. }
  1141. return pm_runtime_force_suspend(dev);
  1142. }
  1143. /**
  1144. * xcan_resume - Resume from suspend
  1145. * @dev: Address of the device structure
  1146. *
  1147. * Resume operation after suspend.
  1148. * Return: 0 on success and failure value on error
  1149. */
  1150. static int __maybe_unused xcan_resume(struct device *dev)
  1151. {
  1152. struct net_device *ndev = dev_get_drvdata(dev);
  1153. int ret;
  1154. ret = pm_runtime_force_resume(dev);
  1155. if (ret) {
  1156. dev_err(dev, "pm_runtime_force_resume failed on resume\n");
  1157. return ret;
  1158. }
  1159. if (netif_running(ndev)) {
  1160. ret = xcan_chip_start(ndev);
  1161. if (ret) {
  1162. dev_err(dev, "xcan_chip_start failed on resume\n");
  1163. return ret;
  1164. }
  1165. netif_device_attach(ndev);
  1166. netif_start_queue(ndev);
  1167. }
  1168. return 0;
  1169. }
  1170. /**
  1171. * xcan_runtime_suspend - Runtime suspend method for the driver
  1172. * @dev: Address of the device structure
  1173. *
  1174. * Put the driver into low power mode.
  1175. * Return: 0 always
  1176. */
  1177. static int __maybe_unused xcan_runtime_suspend(struct device *dev)
  1178. {
  1179. struct net_device *ndev = dev_get_drvdata(dev);
  1180. struct xcan_priv *priv = netdev_priv(ndev);
  1181. clk_disable_unprepare(priv->bus_clk);
  1182. clk_disable_unprepare(priv->can_clk);
  1183. return 0;
  1184. }
  1185. /**
  1186. * xcan_runtime_resume - Runtime resume from suspend
  1187. * @dev: Address of the device structure
  1188. *
  1189. * Resume operation after suspend.
  1190. * Return: 0 on success and failure value on error
  1191. */
  1192. static int __maybe_unused xcan_runtime_resume(struct device *dev)
  1193. {
  1194. struct net_device *ndev = dev_get_drvdata(dev);
  1195. struct xcan_priv *priv = netdev_priv(ndev);
  1196. int ret;
  1197. ret = clk_prepare_enable(priv->bus_clk);
  1198. if (ret) {
  1199. dev_err(dev, "Cannot enable clock.\n");
  1200. return ret;
  1201. }
  1202. ret = clk_prepare_enable(priv->can_clk);
  1203. if (ret) {
  1204. dev_err(dev, "Cannot enable clock.\n");
  1205. clk_disable_unprepare(priv->bus_clk);
  1206. return ret;
  1207. }
  1208. return 0;
  1209. }
  1210. static const struct dev_pm_ops xcan_dev_pm_ops = {
  1211. SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
  1212. SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
  1213. };
  1214. static const struct xcan_devtype_data xcan_zynq_data = {
  1215. .bittiming_const = &xcan_bittiming_const,
  1216. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
  1217. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
  1218. .bus_clk_name = "pclk",
  1219. };
  1220. static const struct xcan_devtype_data xcan_axi_data = {
  1221. .bittiming_const = &xcan_bittiming_const,
  1222. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
  1223. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
  1224. .bus_clk_name = "s_axi_aclk",
  1225. };
  1226. static const struct xcan_devtype_data xcan_canfd_data = {
  1227. .flags = XCAN_FLAG_EXT_FILTERS |
  1228. XCAN_FLAG_RXMNF |
  1229. XCAN_FLAG_TX_MAILBOXES |
  1230. XCAN_FLAG_RX_FIFO_MULTI,
  1231. .bittiming_const = &xcan_bittiming_const_canfd,
  1232. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
  1233. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
  1234. .bus_clk_name = "s_axi_aclk",
  1235. };
  1236. /* Match table for OF platform binding */
  1237. static const struct of_device_id xcan_of_match[] = {
  1238. { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
  1239. { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
  1240. { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
  1241. { /* end of list */ },
  1242. };
  1243. MODULE_DEVICE_TABLE(of, xcan_of_match);
  1244. /**
  1245. * xcan_probe - Platform registration call
  1246. * @pdev: Handle to the platform device structure
  1247. *
  1248. * This function does all the memory allocation and registration for the CAN
  1249. * device.
  1250. *
  1251. * Return: 0 on success and failure value on error
  1252. */
  1253. static int xcan_probe(struct platform_device *pdev)
  1254. {
  1255. struct resource *res; /* IO mem resources */
  1256. struct net_device *ndev;
  1257. struct xcan_priv *priv;
  1258. const struct of_device_id *of_id;
  1259. const struct xcan_devtype_data *devtype = &xcan_axi_data;
  1260. void __iomem *addr;
  1261. int ret;
  1262. int rx_max, tx_max;
  1263. int hw_tx_max, hw_rx_max;
  1264. const char *hw_tx_max_property;
  1265. /* Get the virtual base address for the device */
  1266. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1267. addr = devm_ioremap_resource(&pdev->dev, res);
  1268. if (IS_ERR(addr)) {
  1269. ret = PTR_ERR(addr);
  1270. goto err;
  1271. }
  1272. of_id = of_match_device(xcan_of_match, &pdev->dev);
  1273. if (of_id && of_id->data)
  1274. devtype = of_id->data;
  1275. hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
  1276. "tx-mailbox-count" : "tx-fifo-depth";
  1277. ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
  1278. &hw_tx_max);
  1279. if (ret < 0) {
  1280. dev_err(&pdev->dev, "missing %s property\n",
  1281. hw_tx_max_property);
  1282. goto err;
  1283. }
  1284. ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
  1285. &hw_rx_max);
  1286. if (ret < 0) {
  1287. dev_err(&pdev->dev,
  1288. "missing rx-fifo-depth property (mailbox mode is not supported)\n");
  1289. goto err;
  1290. }
  1291. /* With TX FIFO:
  1292. *
  1293. * There is no way to directly figure out how many frames have been
  1294. * sent when the TXOK interrupt is processed. If TXFEMP
  1295. * is supported, we can have 2 frames in the FIFO and use TXFEMP
  1296. * to determine if 1 or 2 frames have been sent.
  1297. * Theoretically we should be able to use TXFWMEMP to determine up
  1298. * to 3 frames, but it seems that after putting a second frame in the
  1299. * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
  1300. * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
  1301. * sent), which is not a sensible state - possibly TXFWMEMP is not
  1302. * completely synchronized with the rest of the bits?
  1303. *
  1304. * With TX mailboxes:
  1305. *
  1306. * HW sends frames in CAN ID priority order. To preserve FIFO ordering
  1307. * we submit frames one at a time.
  1308. */
  1309. if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
  1310. (devtype->flags & XCAN_FLAG_TXFEMP))
  1311. tx_max = min(hw_tx_max, 2);
  1312. else
  1313. tx_max = 1;
  1314. rx_max = hw_rx_max;
  1315. /* Create a CAN device instance */
  1316. ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
  1317. if (!ndev)
  1318. return -ENOMEM;
  1319. priv = netdev_priv(ndev);
  1320. priv->dev = &pdev->dev;
  1321. priv->can.bittiming_const = devtype->bittiming_const;
  1322. priv->can.do_set_mode = xcan_do_set_mode;
  1323. priv->can.do_get_berr_counter = xcan_get_berr_counter;
  1324. priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
  1325. CAN_CTRLMODE_BERR_REPORTING;
  1326. priv->reg_base = addr;
  1327. priv->tx_max = tx_max;
  1328. priv->devtype = *devtype;
  1329. spin_lock_init(&priv->tx_lock);
  1330. /* Get IRQ for the device */
  1331. ndev->irq = platform_get_irq(pdev, 0);
  1332. ndev->flags |= IFF_ECHO; /* We support local echo */
  1333. platform_set_drvdata(pdev, ndev);
  1334. SET_NETDEV_DEV(ndev, &pdev->dev);
  1335. ndev->netdev_ops = &xcan_netdev_ops;
  1336. /* Getting the CAN can_clk info */
  1337. priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
  1338. if (IS_ERR(priv->can_clk)) {
  1339. dev_err(&pdev->dev, "Device clock not found.\n");
  1340. ret = PTR_ERR(priv->can_clk);
  1341. goto err_free;
  1342. }
  1343. priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
  1344. if (IS_ERR(priv->bus_clk)) {
  1345. dev_err(&pdev->dev, "bus clock not found\n");
  1346. ret = PTR_ERR(priv->bus_clk);
  1347. goto err_free;
  1348. }
  1349. priv->write_reg = xcan_write_reg_le;
  1350. priv->read_reg = xcan_read_reg_le;
  1351. pm_runtime_enable(&pdev->dev);
  1352. ret = pm_runtime_get_sync(&pdev->dev);
  1353. if (ret < 0) {
  1354. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1355. __func__, ret);
  1356. goto err_pmdisable;
  1357. }
  1358. if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
  1359. priv->write_reg = xcan_write_reg_be;
  1360. priv->read_reg = xcan_read_reg_be;
  1361. }
  1362. priv->can.clock.freq = clk_get_rate(priv->can_clk);
  1363. netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
  1364. ret = register_candev(ndev);
  1365. if (ret) {
  1366. dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
  1367. goto err_disableclks;
  1368. }
  1369. devm_can_led_init(ndev);
  1370. pm_runtime_put(&pdev->dev);
  1371. netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
  1372. priv->reg_base, ndev->irq, priv->can.clock.freq,
  1373. hw_tx_max, priv->tx_max);
  1374. return 0;
  1375. err_disableclks:
  1376. pm_runtime_put(priv->dev);
  1377. err_pmdisable:
  1378. pm_runtime_disable(&pdev->dev);
  1379. err_free:
  1380. free_candev(ndev);
  1381. err:
  1382. return ret;
  1383. }
  1384. /**
  1385. * xcan_remove - Unregister the device after releasing the resources
  1386. * @pdev: Handle to the platform device structure
  1387. *
  1388. * This function frees all the resources allocated to the device.
  1389. * Return: 0 always
  1390. */
  1391. static int xcan_remove(struct platform_device *pdev)
  1392. {
  1393. struct net_device *ndev = platform_get_drvdata(pdev);
  1394. struct xcan_priv *priv = netdev_priv(ndev);
  1395. unregister_candev(ndev);
  1396. pm_runtime_disable(&pdev->dev);
  1397. netif_napi_del(&priv->napi);
  1398. free_candev(ndev);
  1399. return 0;
  1400. }
  1401. static struct platform_driver xcan_driver = {
  1402. .probe = xcan_probe,
  1403. .remove = xcan_remove,
  1404. .driver = {
  1405. .name = DRIVER_NAME,
  1406. .pm = &xcan_dev_pm_ops,
  1407. .of_match_table = xcan_of_match,
  1408. },
  1409. };
  1410. module_platform_driver(xcan_driver);
  1411. MODULE_LICENSE("GPL");
  1412. MODULE_AUTHOR("Xilinx Inc");
  1413. MODULE_DESCRIPTION("Xilinx CAN interface");