nixge.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2016-2017, National Instruments Corp.
  3. *
  4. * Author: Moritz Fischer <mdf@kernel.org>
  5. */
  6. #include <linux/etherdevice.h>
  7. #include <linux/module.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/of_address.h>
  10. #include <linux/of_mdio.h>
  11. #include <linux/of_net.h>
  12. #include <linux/of_platform.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/phy.h>
  16. #include <linux/mii.h>
  17. #include <linux/nvmem-consumer.h>
  18. #include <linux/ethtool.h>
  19. #include <linux/iopoll.h>
  20. #define TX_BD_NUM 64
  21. #define RX_BD_NUM 128
  22. /* Axi DMA Register definitions */
  23. #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
  24. #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
  25. #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
  26. #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
  27. #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
  28. #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
  29. #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
  30. #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
  31. #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
  32. #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
  33. #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
  34. #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
  35. #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
  36. #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
  37. #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
  38. #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
  39. #define XAXIDMA_DELAY_SHIFT 24
  40. #define XAXIDMA_COALESCE_SHIFT 16
  41. #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
  42. #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
  43. #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
  44. #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
  45. /* Default TX/RX Threshold and waitbound values for SGDMA mode */
  46. #define XAXIDMA_DFT_TX_THRESHOLD 24
  47. #define XAXIDMA_DFT_TX_WAITBOUND 254
  48. #define XAXIDMA_DFT_RX_THRESHOLD 24
  49. #define XAXIDMA_DFT_RX_WAITBOUND 254
  50. #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
  51. #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
  52. #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
  53. #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
  54. #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
  55. #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
  56. #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
  57. #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
  58. #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
  59. #define NIXGE_REG_CTRL_OFFSET 0x4000
  60. #define NIXGE_REG_INFO 0x00
  61. #define NIXGE_REG_MAC_CTL 0x04
  62. #define NIXGE_REG_PHY_CTL 0x08
  63. #define NIXGE_REG_LED_CTL 0x0c
  64. #define NIXGE_REG_MDIO_DATA 0x10
  65. #define NIXGE_REG_MDIO_ADDR 0x14
  66. #define NIXGE_REG_MDIO_OP 0x18
  67. #define NIXGE_REG_MDIO_CTRL 0x1c
  68. #define NIXGE_ID_LED_CTL_EN BIT(0)
  69. #define NIXGE_ID_LED_CTL_VAL BIT(1)
  70. #define NIXGE_MDIO_CLAUSE45 BIT(12)
  71. #define NIXGE_MDIO_CLAUSE22 0
  72. #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
  73. #define NIXGE_MDIO_OP_ADDRESS 0
  74. #define NIXGE_MDIO_C45_WRITE BIT(0)
  75. #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
  76. #define NIXGE_MDIO_C22_WRITE BIT(0)
  77. #define NIXGE_MDIO_C22_READ BIT(1)
  78. #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
  79. #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
  80. #define NIXGE_REG_MAC_LSB 0x1000
  81. #define NIXGE_REG_MAC_MSB 0x1004
  82. /* Packet size info */
  83. #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
  84. #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
  85. #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
  86. #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
  87. #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
  88. #define NIXGE_MAX_JUMBO_FRAME_SIZE \
  89. (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
  90. struct nixge_hw_dma_bd {
  91. u32 next;
  92. u32 reserved1;
  93. u32 phys;
  94. u32 reserved2;
  95. u32 reserved3;
  96. u32 reserved4;
  97. u32 cntrl;
  98. u32 status;
  99. u32 app0;
  100. u32 app1;
  101. u32 app2;
  102. u32 app3;
  103. u32 app4;
  104. u32 sw_id_offset;
  105. u32 reserved5;
  106. u32 reserved6;
  107. };
  108. struct nixge_tx_skb {
  109. struct sk_buff *skb;
  110. dma_addr_t mapping;
  111. size_t size;
  112. bool mapped_as_page;
  113. };
  114. struct nixge_priv {
  115. struct net_device *ndev;
  116. struct napi_struct napi;
  117. struct device *dev;
  118. /* Connection to PHY device */
  119. struct device_node *phy_node;
  120. phy_interface_t phy_mode;
  121. int link;
  122. unsigned int speed;
  123. unsigned int duplex;
  124. /* MDIO bus data */
  125. struct mii_bus *mii_bus; /* MII bus reference */
  126. /* IO registers, dma functions and IRQs */
  127. void __iomem *ctrl_regs;
  128. void __iomem *dma_regs;
  129. struct tasklet_struct dma_err_tasklet;
  130. int tx_irq;
  131. int rx_irq;
  132. /* Buffer descriptors */
  133. struct nixge_hw_dma_bd *tx_bd_v;
  134. struct nixge_tx_skb *tx_skb;
  135. dma_addr_t tx_bd_p;
  136. struct nixge_hw_dma_bd *rx_bd_v;
  137. dma_addr_t rx_bd_p;
  138. u32 tx_bd_ci;
  139. u32 tx_bd_tail;
  140. u32 rx_bd_ci;
  141. u32 coalesce_count_rx;
  142. u32 coalesce_count_tx;
  143. };
  144. static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
  145. {
  146. writel(val, priv->dma_regs + offset);
  147. }
  148. static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
  149. {
  150. return readl(priv->dma_regs + offset);
  151. }
  152. static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
  153. {
  154. writel(val, priv->ctrl_regs + offset);
  155. }
  156. static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
  157. {
  158. return readl(priv->ctrl_regs + offset);
  159. }
  160. #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
  161. readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
  162. (sleep_us), (timeout_us))
  163. #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
  164. readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
  165. (sleep_us), (timeout_us))
  166. static void nixge_hw_dma_bd_release(struct net_device *ndev)
  167. {
  168. struct nixge_priv *priv = netdev_priv(ndev);
  169. int i;
  170. for (i = 0; i < RX_BD_NUM; i++) {
  171. dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
  172. NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
  173. dev_kfree_skb((struct sk_buff *)
  174. (priv->rx_bd_v[i].sw_id_offset));
  175. }
  176. if (priv->rx_bd_v)
  177. dma_free_coherent(ndev->dev.parent,
  178. sizeof(*priv->rx_bd_v) * RX_BD_NUM,
  179. priv->rx_bd_v,
  180. priv->rx_bd_p);
  181. if (priv->tx_skb)
  182. devm_kfree(ndev->dev.parent, priv->tx_skb);
  183. if (priv->tx_bd_v)
  184. dma_free_coherent(ndev->dev.parent,
  185. sizeof(*priv->tx_bd_v) * TX_BD_NUM,
  186. priv->tx_bd_v,
  187. priv->tx_bd_p);
  188. }
  189. static int nixge_hw_dma_bd_init(struct net_device *ndev)
  190. {
  191. struct nixge_priv *priv = netdev_priv(ndev);
  192. struct sk_buff *skb;
  193. u32 cr;
  194. int i;
  195. /* Reset the indexes which are used for accessing the BDs */
  196. priv->tx_bd_ci = 0;
  197. priv->tx_bd_tail = 0;
  198. priv->rx_bd_ci = 0;
  199. /* Allocate the Tx and Rx buffer descriptors. */
  200. priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
  201. sizeof(*priv->tx_bd_v) * TX_BD_NUM,
  202. &priv->tx_bd_p, GFP_KERNEL);
  203. if (!priv->tx_bd_v)
  204. goto out;
  205. priv->tx_skb = devm_kcalloc(ndev->dev.parent,
  206. TX_BD_NUM, sizeof(*priv->tx_skb),
  207. GFP_KERNEL);
  208. if (!priv->tx_skb)
  209. goto out;
  210. priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
  211. sizeof(*priv->rx_bd_v) * RX_BD_NUM,
  212. &priv->rx_bd_p, GFP_KERNEL);
  213. if (!priv->rx_bd_v)
  214. goto out;
  215. for (i = 0; i < TX_BD_NUM; i++) {
  216. priv->tx_bd_v[i].next = priv->tx_bd_p +
  217. sizeof(*priv->tx_bd_v) *
  218. ((i + 1) % TX_BD_NUM);
  219. }
  220. for (i = 0; i < RX_BD_NUM; i++) {
  221. priv->rx_bd_v[i].next = priv->rx_bd_p +
  222. sizeof(*priv->rx_bd_v) *
  223. ((i + 1) % RX_BD_NUM);
  224. skb = netdev_alloc_skb_ip_align(ndev,
  225. NIXGE_MAX_JUMBO_FRAME_SIZE);
  226. if (!skb)
  227. goto out;
  228. priv->rx_bd_v[i].sw_id_offset = (u32)skb;
  229. priv->rx_bd_v[i].phys =
  230. dma_map_single(ndev->dev.parent,
  231. skb->data,
  232. NIXGE_MAX_JUMBO_FRAME_SIZE,
  233. DMA_FROM_DEVICE);
  234. priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
  235. }
  236. /* Start updating the Rx channel control register */
  237. cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  238. /* Update the interrupt coalesce count */
  239. cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
  240. ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
  241. /* Update the delay timer count */
  242. cr = ((cr & ~XAXIDMA_DELAY_MASK) |
  243. (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
  244. /* Enable coalesce, delay timer and error interrupts */
  245. cr |= XAXIDMA_IRQ_ALL_MASK;
  246. /* Write to the Rx channel control register */
  247. nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
  248. /* Start updating the Tx channel control register */
  249. cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
  250. /* Update the interrupt coalesce count */
  251. cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
  252. ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
  253. /* Update the delay timer count */
  254. cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
  255. (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
  256. /* Enable coalesce, delay timer and error interrupts */
  257. cr |= XAXIDMA_IRQ_ALL_MASK;
  258. /* Write to the Tx channel control register */
  259. nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
  260. /* Populate the tail pointer and bring the Rx Axi DMA engine out of
  261. * halted state. This will make the Rx side ready for reception.
  262. */
  263. nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
  264. cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  265. nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
  266. cr | XAXIDMA_CR_RUNSTOP_MASK);
  267. nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
  268. (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
  269. /* Write to the RS (Run-stop) bit in the Tx channel control register.
  270. * Tx channel is now ready to run. But only after we write to the
  271. * tail pointer register that the Tx channel will start transmitting.
  272. */
  273. nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
  274. cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
  275. nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
  276. cr | XAXIDMA_CR_RUNSTOP_MASK);
  277. return 0;
  278. out:
  279. nixge_hw_dma_bd_release(ndev);
  280. return -ENOMEM;
  281. }
  282. static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
  283. {
  284. u32 status;
  285. int err;
  286. /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
  287. * The reset process of Axi DMA takes a while to complete as all
  288. * pending commands/transfers will be flushed or completed during
  289. * this reset process.
  290. */
  291. nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
  292. err = nixge_dma_poll_timeout(priv, offset, status,
  293. !(status & XAXIDMA_CR_RESET_MASK), 10,
  294. 1000);
  295. if (err)
  296. netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
  297. }
  298. static void nixge_device_reset(struct net_device *ndev)
  299. {
  300. struct nixge_priv *priv = netdev_priv(ndev);
  301. __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
  302. __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
  303. if (nixge_hw_dma_bd_init(ndev))
  304. netdev_err(ndev, "%s: descriptor allocation failed\n",
  305. __func__);
  306. netif_trans_update(ndev);
  307. }
  308. static void nixge_handle_link_change(struct net_device *ndev)
  309. {
  310. struct nixge_priv *priv = netdev_priv(ndev);
  311. struct phy_device *phydev = ndev->phydev;
  312. if (phydev->link != priv->link || phydev->speed != priv->speed ||
  313. phydev->duplex != priv->duplex) {
  314. priv->link = phydev->link;
  315. priv->speed = phydev->speed;
  316. priv->duplex = phydev->duplex;
  317. phy_print_status(phydev);
  318. }
  319. }
  320. static void nixge_tx_skb_unmap(struct nixge_priv *priv,
  321. struct nixge_tx_skb *tx_skb)
  322. {
  323. if (tx_skb->mapping) {
  324. if (tx_skb->mapped_as_page)
  325. dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
  326. tx_skb->size, DMA_TO_DEVICE);
  327. else
  328. dma_unmap_single(priv->ndev->dev.parent,
  329. tx_skb->mapping,
  330. tx_skb->size, DMA_TO_DEVICE);
  331. tx_skb->mapping = 0;
  332. }
  333. if (tx_skb->skb) {
  334. dev_kfree_skb_any(tx_skb->skb);
  335. tx_skb->skb = NULL;
  336. }
  337. }
  338. static void nixge_start_xmit_done(struct net_device *ndev)
  339. {
  340. struct nixge_priv *priv = netdev_priv(ndev);
  341. struct nixge_hw_dma_bd *cur_p;
  342. struct nixge_tx_skb *tx_skb;
  343. unsigned int status = 0;
  344. u32 packets = 0;
  345. u32 size = 0;
  346. cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
  347. tx_skb = &priv->tx_skb[priv->tx_bd_ci];
  348. status = cur_p->status;
  349. while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
  350. nixge_tx_skb_unmap(priv, tx_skb);
  351. cur_p->status = 0;
  352. size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
  353. packets++;
  354. ++priv->tx_bd_ci;
  355. priv->tx_bd_ci %= TX_BD_NUM;
  356. cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
  357. tx_skb = &priv->tx_skb[priv->tx_bd_ci];
  358. status = cur_p->status;
  359. }
  360. ndev->stats.tx_packets += packets;
  361. ndev->stats.tx_bytes += size;
  362. if (packets)
  363. netif_wake_queue(ndev);
  364. }
  365. static int nixge_check_tx_bd_space(struct nixge_priv *priv,
  366. int num_frag)
  367. {
  368. struct nixge_hw_dma_bd *cur_p;
  369. cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
  370. if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
  371. return NETDEV_TX_BUSY;
  372. return 0;
  373. }
  374. static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  375. {
  376. struct nixge_priv *priv = netdev_priv(ndev);
  377. struct nixge_hw_dma_bd *cur_p;
  378. struct nixge_tx_skb *tx_skb;
  379. dma_addr_t tail_p;
  380. skb_frag_t *frag;
  381. u32 num_frag;
  382. u32 ii;
  383. num_frag = skb_shinfo(skb)->nr_frags;
  384. cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
  385. tx_skb = &priv->tx_skb[priv->tx_bd_tail];
  386. if (nixge_check_tx_bd_space(priv, num_frag)) {
  387. if (!netif_queue_stopped(ndev))
  388. netif_stop_queue(ndev);
  389. return NETDEV_TX_OK;
  390. }
  391. cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
  392. skb_headlen(skb), DMA_TO_DEVICE);
  393. if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
  394. goto drop;
  395. cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
  396. tx_skb->skb = NULL;
  397. tx_skb->mapping = cur_p->phys;
  398. tx_skb->size = skb_headlen(skb);
  399. tx_skb->mapped_as_page = false;
  400. for (ii = 0; ii < num_frag; ii++) {
  401. ++priv->tx_bd_tail;
  402. priv->tx_bd_tail %= TX_BD_NUM;
  403. cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
  404. tx_skb = &priv->tx_skb[priv->tx_bd_tail];
  405. frag = &skb_shinfo(skb)->frags[ii];
  406. cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
  407. skb_frag_size(frag),
  408. DMA_TO_DEVICE);
  409. if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
  410. goto frag_err;
  411. cur_p->cntrl = skb_frag_size(frag);
  412. tx_skb->skb = NULL;
  413. tx_skb->mapping = cur_p->phys;
  414. tx_skb->size = skb_frag_size(frag);
  415. tx_skb->mapped_as_page = true;
  416. }
  417. /* last buffer of the frame */
  418. tx_skb->skb = skb;
  419. cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
  420. tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
  421. /* Start the transfer */
  422. nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
  423. ++priv->tx_bd_tail;
  424. priv->tx_bd_tail %= TX_BD_NUM;
  425. return NETDEV_TX_OK;
  426. frag_err:
  427. for (; ii > 0; ii--) {
  428. if (priv->tx_bd_tail)
  429. priv->tx_bd_tail--;
  430. else
  431. priv->tx_bd_tail = TX_BD_NUM - 1;
  432. tx_skb = &priv->tx_skb[priv->tx_bd_tail];
  433. nixge_tx_skb_unmap(priv, tx_skb);
  434. cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
  435. cur_p->status = 0;
  436. }
  437. dma_unmap_single(priv->ndev->dev.parent,
  438. tx_skb->mapping,
  439. tx_skb->size, DMA_TO_DEVICE);
  440. drop:
  441. ndev->stats.tx_dropped++;
  442. return NETDEV_TX_OK;
  443. }
  444. static int nixge_recv(struct net_device *ndev, int budget)
  445. {
  446. struct nixge_priv *priv = netdev_priv(ndev);
  447. struct sk_buff *skb, *new_skb;
  448. struct nixge_hw_dma_bd *cur_p;
  449. dma_addr_t tail_p = 0;
  450. u32 packets = 0;
  451. u32 length = 0;
  452. u32 size = 0;
  453. cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
  454. while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
  455. budget > packets)) {
  456. tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
  457. priv->rx_bd_ci;
  458. skb = (struct sk_buff *)(cur_p->sw_id_offset);
  459. length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
  460. if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
  461. length = NIXGE_MAX_JUMBO_FRAME_SIZE;
  462. dma_unmap_single(ndev->dev.parent, cur_p->phys,
  463. NIXGE_MAX_JUMBO_FRAME_SIZE,
  464. DMA_FROM_DEVICE);
  465. skb_put(skb, length);
  466. skb->protocol = eth_type_trans(skb, ndev);
  467. skb_checksum_none_assert(skb);
  468. /* For now mark them as CHECKSUM_NONE since
  469. * we don't have offload capabilities
  470. */
  471. skb->ip_summed = CHECKSUM_NONE;
  472. napi_gro_receive(&priv->napi, skb);
  473. size += length;
  474. packets++;
  475. new_skb = netdev_alloc_skb_ip_align(ndev,
  476. NIXGE_MAX_JUMBO_FRAME_SIZE);
  477. if (!new_skb)
  478. return packets;
  479. cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
  480. NIXGE_MAX_JUMBO_FRAME_SIZE,
  481. DMA_FROM_DEVICE);
  482. if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
  483. /* FIXME: bail out and clean up */
  484. netdev_err(ndev, "Failed to map ...\n");
  485. }
  486. cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
  487. cur_p->status = 0;
  488. cur_p->sw_id_offset = (u32)new_skb;
  489. ++priv->rx_bd_ci;
  490. priv->rx_bd_ci %= RX_BD_NUM;
  491. cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
  492. }
  493. ndev->stats.rx_packets += packets;
  494. ndev->stats.rx_bytes += size;
  495. if (tail_p)
  496. nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
  497. return packets;
  498. }
  499. static int nixge_poll(struct napi_struct *napi, int budget)
  500. {
  501. struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
  502. int work_done;
  503. u32 status, cr;
  504. work_done = 0;
  505. work_done = nixge_recv(priv->ndev, budget);
  506. if (work_done < budget) {
  507. napi_complete_done(napi, work_done);
  508. status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
  509. if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
  510. /* If there's more, reschedule, but clear */
  511. nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
  512. napi_reschedule(napi);
  513. } else {
  514. /* if not, turn on RX IRQs again ... */
  515. cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  516. cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
  517. nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
  518. }
  519. }
  520. return work_done;
  521. }
  522. static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
  523. {
  524. struct nixge_priv *priv = netdev_priv(_ndev);
  525. struct net_device *ndev = _ndev;
  526. unsigned int status;
  527. u32 cr;
  528. status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
  529. if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
  530. nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
  531. nixge_start_xmit_done(priv->ndev);
  532. goto out;
  533. }
  534. if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
  535. netdev_err(ndev, "No interrupts asserted in Tx path\n");
  536. return IRQ_NONE;
  537. }
  538. if (status & XAXIDMA_IRQ_ERROR_MASK) {
  539. netdev_err(ndev, "DMA Tx error 0x%x\n", status);
  540. netdev_err(ndev, "Current BD is at: 0x%x\n",
  541. (priv->tx_bd_v[priv->tx_bd_ci]).phys);
  542. cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
  543. /* Disable coalesce, delay timer and error interrupts */
  544. cr &= (~XAXIDMA_IRQ_ALL_MASK);
  545. /* Write to the Tx channel control register */
  546. nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
  547. cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  548. /* Disable coalesce, delay timer and error interrupts */
  549. cr &= (~XAXIDMA_IRQ_ALL_MASK);
  550. /* Write to the Rx channel control register */
  551. nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
  552. tasklet_schedule(&priv->dma_err_tasklet);
  553. nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
  554. }
  555. out:
  556. return IRQ_HANDLED;
  557. }
  558. static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
  559. {
  560. struct nixge_priv *priv = netdev_priv(_ndev);
  561. struct net_device *ndev = _ndev;
  562. unsigned int status;
  563. u32 cr;
  564. status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
  565. if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
  566. /* Turn of IRQs because NAPI */
  567. nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
  568. cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  569. cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
  570. nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
  571. if (napi_schedule_prep(&priv->napi))
  572. __napi_schedule(&priv->napi);
  573. goto out;
  574. }
  575. if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
  576. netdev_err(ndev, "No interrupts asserted in Rx path\n");
  577. return IRQ_NONE;
  578. }
  579. if (status & XAXIDMA_IRQ_ERROR_MASK) {
  580. netdev_err(ndev, "DMA Rx error 0x%x\n", status);
  581. netdev_err(ndev, "Current BD is at: 0x%x\n",
  582. (priv->rx_bd_v[priv->rx_bd_ci]).phys);
  583. cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
  584. /* Disable coalesce, delay timer and error interrupts */
  585. cr &= (~XAXIDMA_IRQ_ALL_MASK);
  586. /* Finally write to the Tx channel control register */
  587. nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
  588. cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  589. /* Disable coalesce, delay timer and error interrupts */
  590. cr &= (~XAXIDMA_IRQ_ALL_MASK);
  591. /* write to the Rx channel control register */
  592. nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
  593. tasklet_schedule(&priv->dma_err_tasklet);
  594. nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
  595. }
  596. out:
  597. return IRQ_HANDLED;
  598. }
  599. static void nixge_dma_err_handler(unsigned long data)
  600. {
  601. struct nixge_priv *lp = (struct nixge_priv *)data;
  602. struct nixge_hw_dma_bd *cur_p;
  603. struct nixge_tx_skb *tx_skb;
  604. u32 cr, i;
  605. __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
  606. __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
  607. for (i = 0; i < TX_BD_NUM; i++) {
  608. cur_p = &lp->tx_bd_v[i];
  609. tx_skb = &lp->tx_skb[i];
  610. nixge_tx_skb_unmap(lp, tx_skb);
  611. cur_p->phys = 0;
  612. cur_p->cntrl = 0;
  613. cur_p->status = 0;
  614. cur_p->sw_id_offset = 0;
  615. }
  616. for (i = 0; i < RX_BD_NUM; i++) {
  617. cur_p = &lp->rx_bd_v[i];
  618. cur_p->status = 0;
  619. }
  620. lp->tx_bd_ci = 0;
  621. lp->tx_bd_tail = 0;
  622. lp->rx_bd_ci = 0;
  623. /* Start updating the Rx channel control register */
  624. cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
  625. /* Update the interrupt coalesce count */
  626. cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
  627. (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
  628. /* Update the delay timer count */
  629. cr = ((cr & ~XAXIDMA_DELAY_MASK) |
  630. (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
  631. /* Enable coalesce, delay timer and error interrupts */
  632. cr |= XAXIDMA_IRQ_ALL_MASK;
  633. /* Finally write to the Rx channel control register */
  634. nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
  635. /* Start updating the Tx channel control register */
  636. cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
  637. /* Update the interrupt coalesce count */
  638. cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
  639. (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
  640. /* Update the delay timer count */
  641. cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
  642. (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
  643. /* Enable coalesce, delay timer and error interrupts */
  644. cr |= XAXIDMA_IRQ_ALL_MASK;
  645. /* Finally write to the Tx channel control register */
  646. nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
  647. /* Populate the tail pointer and bring the Rx Axi DMA engine out of
  648. * halted state. This will make the Rx side ready for reception.
  649. */
  650. nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
  651. cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
  652. nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
  653. cr | XAXIDMA_CR_RUNSTOP_MASK);
  654. nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
  655. (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
  656. /* Write to the RS (Run-stop) bit in the Tx channel control register.
  657. * Tx channel is now ready to run. But only after we write to the
  658. * tail pointer register that the Tx channel will start transmitting
  659. */
  660. nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
  661. cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
  662. nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
  663. cr | XAXIDMA_CR_RUNSTOP_MASK);
  664. }
  665. static int nixge_open(struct net_device *ndev)
  666. {
  667. struct nixge_priv *priv = netdev_priv(ndev);
  668. struct phy_device *phy;
  669. int ret;
  670. nixge_device_reset(ndev);
  671. phy = of_phy_connect(ndev, priv->phy_node,
  672. &nixge_handle_link_change, 0, priv->phy_mode);
  673. if (!phy)
  674. return -ENODEV;
  675. phy_start(phy);
  676. /* Enable tasklets for Axi DMA error handling */
  677. tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
  678. (unsigned long)priv);
  679. napi_enable(&priv->napi);
  680. /* Enable interrupts for Axi DMA Tx */
  681. ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
  682. if (ret)
  683. goto err_tx_irq;
  684. /* Enable interrupts for Axi DMA Rx */
  685. ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
  686. if (ret)
  687. goto err_rx_irq;
  688. netif_start_queue(ndev);
  689. return 0;
  690. err_rx_irq:
  691. free_irq(priv->tx_irq, ndev);
  692. err_tx_irq:
  693. phy_stop(phy);
  694. phy_disconnect(phy);
  695. tasklet_kill(&priv->dma_err_tasklet);
  696. netdev_err(ndev, "request_irq() failed\n");
  697. return ret;
  698. }
  699. static int nixge_stop(struct net_device *ndev)
  700. {
  701. struct nixge_priv *priv = netdev_priv(ndev);
  702. u32 cr;
  703. netif_stop_queue(ndev);
  704. napi_disable(&priv->napi);
  705. if (ndev->phydev) {
  706. phy_stop(ndev->phydev);
  707. phy_disconnect(ndev->phydev);
  708. }
  709. cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  710. nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
  711. cr & (~XAXIDMA_CR_RUNSTOP_MASK));
  712. cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
  713. nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
  714. cr & (~XAXIDMA_CR_RUNSTOP_MASK));
  715. tasklet_kill(&priv->dma_err_tasklet);
  716. free_irq(priv->tx_irq, ndev);
  717. free_irq(priv->rx_irq, ndev);
  718. nixge_hw_dma_bd_release(ndev);
  719. return 0;
  720. }
  721. static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
  722. {
  723. if (netif_running(ndev))
  724. return -EBUSY;
  725. if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
  726. NIXGE_MAX_JUMBO_FRAME_SIZE)
  727. return -EINVAL;
  728. ndev->mtu = new_mtu;
  729. return 0;
  730. }
  731. static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
  732. {
  733. struct nixge_priv *priv = netdev_priv(ndev);
  734. nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
  735. (ndev->dev_addr[2]) << 24 |
  736. (ndev->dev_addr[3] << 16) |
  737. (ndev->dev_addr[4] << 8) |
  738. (ndev->dev_addr[5] << 0));
  739. nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
  740. (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
  741. return 0;
  742. }
  743. static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
  744. {
  745. int err;
  746. err = eth_mac_addr(ndev, p);
  747. if (!err)
  748. __nixge_hw_set_mac_address(ndev);
  749. return err;
  750. }
  751. static const struct net_device_ops nixge_netdev_ops = {
  752. .ndo_open = nixge_open,
  753. .ndo_stop = nixge_stop,
  754. .ndo_start_xmit = nixge_start_xmit,
  755. .ndo_change_mtu = nixge_change_mtu,
  756. .ndo_set_mac_address = nixge_net_set_mac_address,
  757. .ndo_validate_addr = eth_validate_addr,
  758. };
  759. static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
  760. struct ethtool_drvinfo *ed)
  761. {
  762. strlcpy(ed->driver, "nixge", sizeof(ed->driver));
  763. strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
  764. }
  765. static int nixge_ethtools_get_coalesce(struct net_device *ndev,
  766. struct ethtool_coalesce *ecoalesce)
  767. {
  768. struct nixge_priv *priv = netdev_priv(ndev);
  769. u32 regval = 0;
  770. regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
  771. ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
  772. >> XAXIDMA_COALESCE_SHIFT;
  773. regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
  774. ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
  775. >> XAXIDMA_COALESCE_SHIFT;
  776. return 0;
  777. }
  778. static int nixge_ethtools_set_coalesce(struct net_device *ndev,
  779. struct ethtool_coalesce *ecoalesce)
  780. {
  781. struct nixge_priv *priv = netdev_priv(ndev);
  782. if (netif_running(ndev)) {
  783. netdev_err(ndev,
  784. "Please stop netif before applying configuration\n");
  785. return -EBUSY;
  786. }
  787. if (ecoalesce->rx_coalesce_usecs ||
  788. ecoalesce->rx_coalesce_usecs_irq ||
  789. ecoalesce->rx_max_coalesced_frames_irq ||
  790. ecoalesce->tx_coalesce_usecs ||
  791. ecoalesce->tx_coalesce_usecs_irq ||
  792. ecoalesce->tx_max_coalesced_frames_irq ||
  793. ecoalesce->stats_block_coalesce_usecs ||
  794. ecoalesce->use_adaptive_rx_coalesce ||
  795. ecoalesce->use_adaptive_tx_coalesce ||
  796. ecoalesce->pkt_rate_low ||
  797. ecoalesce->rx_coalesce_usecs_low ||
  798. ecoalesce->rx_max_coalesced_frames_low ||
  799. ecoalesce->tx_coalesce_usecs_low ||
  800. ecoalesce->tx_max_coalesced_frames_low ||
  801. ecoalesce->pkt_rate_high ||
  802. ecoalesce->rx_coalesce_usecs_high ||
  803. ecoalesce->rx_max_coalesced_frames_high ||
  804. ecoalesce->tx_coalesce_usecs_high ||
  805. ecoalesce->tx_max_coalesced_frames_high ||
  806. ecoalesce->rate_sample_interval)
  807. return -EOPNOTSUPP;
  808. if (ecoalesce->rx_max_coalesced_frames)
  809. priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
  810. if (ecoalesce->tx_max_coalesced_frames)
  811. priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
  812. return 0;
  813. }
  814. static int nixge_ethtools_set_phys_id(struct net_device *ndev,
  815. enum ethtool_phys_id_state state)
  816. {
  817. struct nixge_priv *priv = netdev_priv(ndev);
  818. u32 ctrl;
  819. ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
  820. switch (state) {
  821. case ETHTOOL_ID_ACTIVE:
  822. ctrl |= NIXGE_ID_LED_CTL_EN;
  823. /* Enable identification LED override*/
  824. nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
  825. return 2;
  826. case ETHTOOL_ID_ON:
  827. ctrl |= NIXGE_ID_LED_CTL_VAL;
  828. nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
  829. break;
  830. case ETHTOOL_ID_OFF:
  831. ctrl &= ~NIXGE_ID_LED_CTL_VAL;
  832. nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
  833. break;
  834. case ETHTOOL_ID_INACTIVE:
  835. /* Restore LED settings */
  836. ctrl &= ~NIXGE_ID_LED_CTL_EN;
  837. nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
  838. break;
  839. }
  840. return 0;
  841. }
  842. static const struct ethtool_ops nixge_ethtool_ops = {
  843. .get_drvinfo = nixge_ethtools_get_drvinfo,
  844. .get_coalesce = nixge_ethtools_get_coalesce,
  845. .set_coalesce = nixge_ethtools_set_coalesce,
  846. .set_phys_id = nixge_ethtools_set_phys_id,
  847. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  848. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  849. .get_link = ethtool_op_get_link,
  850. };
  851. static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  852. {
  853. struct nixge_priv *priv = bus->priv;
  854. u32 status, tmp;
  855. int err;
  856. u16 device;
  857. if (reg & MII_ADDR_C45) {
  858. device = (reg >> 16) & 0x1f;
  859. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
  860. tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
  861. | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
  862. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
  863. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
  864. err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
  865. !status, 10, 1000);
  866. if (err) {
  867. dev_err(priv->dev, "timeout setting address");
  868. return err;
  869. }
  870. tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
  871. NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
  872. } else {
  873. device = reg & 0x1f;
  874. tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
  875. NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
  876. }
  877. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
  878. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
  879. err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
  880. !status, 10, 1000);
  881. if (err) {
  882. dev_err(priv->dev, "timeout setting read command");
  883. return err;
  884. }
  885. status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
  886. return status;
  887. }
  888. static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
  889. {
  890. struct nixge_priv *priv = bus->priv;
  891. u32 status, tmp;
  892. u16 device;
  893. int err;
  894. if (reg & MII_ADDR_C45) {
  895. device = (reg >> 16) & 0x1f;
  896. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
  897. tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
  898. | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
  899. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
  900. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
  901. err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
  902. !status, 10, 1000);
  903. if (err) {
  904. dev_err(priv->dev, "timeout setting address");
  905. return err;
  906. }
  907. tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
  908. | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
  909. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
  910. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
  911. err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
  912. !status, 10, 1000);
  913. if (err)
  914. dev_err(priv->dev, "timeout setting write command");
  915. } else {
  916. device = reg & 0x1f;
  917. tmp = NIXGE_MDIO_CLAUSE22 |
  918. NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
  919. NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
  920. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
  921. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
  922. nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
  923. err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
  924. !status, 10, 1000);
  925. if (err)
  926. dev_err(priv->dev, "timeout setting write command");
  927. }
  928. return err;
  929. }
  930. static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
  931. {
  932. struct mii_bus *bus;
  933. bus = devm_mdiobus_alloc(priv->dev);
  934. if (!bus)
  935. return -ENOMEM;
  936. snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
  937. bus->priv = priv;
  938. bus->name = "nixge_mii_bus";
  939. bus->read = nixge_mdio_read;
  940. bus->write = nixge_mdio_write;
  941. bus->parent = priv->dev;
  942. priv->mii_bus = bus;
  943. return of_mdiobus_register(bus, np);
  944. }
  945. static void *nixge_get_nvmem_address(struct device *dev)
  946. {
  947. struct nvmem_cell *cell;
  948. size_t cell_size;
  949. char *mac;
  950. cell = nvmem_cell_get(dev, "address");
  951. if (IS_ERR(cell))
  952. return NULL;
  953. mac = nvmem_cell_read(cell, &cell_size);
  954. nvmem_cell_put(cell);
  955. return mac;
  956. }
  957. static int nixge_probe(struct platform_device *pdev)
  958. {
  959. struct nixge_priv *priv;
  960. struct net_device *ndev;
  961. struct resource *dmares;
  962. const u8 *mac_addr;
  963. int err;
  964. ndev = alloc_etherdev(sizeof(*priv));
  965. if (!ndev)
  966. return -ENOMEM;
  967. platform_set_drvdata(pdev, ndev);
  968. SET_NETDEV_DEV(ndev, &pdev->dev);
  969. ndev->features = NETIF_F_SG;
  970. ndev->netdev_ops = &nixge_netdev_ops;
  971. ndev->ethtool_ops = &nixge_ethtool_ops;
  972. /* MTU range: 64 - 9000 */
  973. ndev->min_mtu = 64;
  974. ndev->max_mtu = NIXGE_JUMBO_MTU;
  975. mac_addr = nixge_get_nvmem_address(&pdev->dev);
  976. if (mac_addr && is_valid_ether_addr(mac_addr)) {
  977. ether_addr_copy(ndev->dev_addr, mac_addr);
  978. kfree(mac_addr);
  979. } else {
  980. eth_hw_addr_random(ndev);
  981. }
  982. priv = netdev_priv(ndev);
  983. priv->ndev = ndev;
  984. priv->dev = &pdev->dev;
  985. netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
  986. dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  987. priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
  988. if (IS_ERR(priv->dma_regs)) {
  989. netdev_err(ndev, "failed to map dma regs\n");
  990. return PTR_ERR(priv->dma_regs);
  991. }
  992. priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
  993. __nixge_hw_set_mac_address(ndev);
  994. priv->tx_irq = platform_get_irq_byname(pdev, "tx");
  995. if (priv->tx_irq < 0) {
  996. netdev_err(ndev, "could not find 'tx' irq");
  997. return priv->tx_irq;
  998. }
  999. priv->rx_irq = platform_get_irq_byname(pdev, "rx");
  1000. if (priv->rx_irq < 0) {
  1001. netdev_err(ndev, "could not find 'rx' irq");
  1002. return priv->rx_irq;
  1003. }
  1004. priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
  1005. priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
  1006. err = nixge_mdio_setup(priv, pdev->dev.of_node);
  1007. if (err) {
  1008. netdev_err(ndev, "error registering mdio bus");
  1009. goto free_netdev;
  1010. }
  1011. priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
  1012. if ((int)priv->phy_mode < 0) {
  1013. netdev_err(ndev, "not find \"phy-mode\" property\n");
  1014. err = -EINVAL;
  1015. goto unregister_mdio;
  1016. }
  1017. priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1018. if (!priv->phy_node) {
  1019. netdev_err(ndev, "not find \"phy-handle\" property\n");
  1020. err = -EINVAL;
  1021. goto unregister_mdio;
  1022. }
  1023. err = register_netdev(priv->ndev);
  1024. if (err) {
  1025. netdev_err(ndev, "register_netdev() error (%i)\n", err);
  1026. goto unregister_mdio;
  1027. }
  1028. return 0;
  1029. unregister_mdio:
  1030. mdiobus_unregister(priv->mii_bus);
  1031. free_netdev:
  1032. free_netdev(ndev);
  1033. return err;
  1034. }
  1035. static int nixge_remove(struct platform_device *pdev)
  1036. {
  1037. struct net_device *ndev = platform_get_drvdata(pdev);
  1038. struct nixge_priv *priv = netdev_priv(ndev);
  1039. unregister_netdev(ndev);
  1040. mdiobus_unregister(priv->mii_bus);
  1041. free_netdev(ndev);
  1042. return 0;
  1043. }
  1044. /* Match table for of_platform binding */
  1045. static const struct of_device_id nixge_dt_ids[] = {
  1046. { .compatible = "ni,xge-enet-2.00", },
  1047. {},
  1048. };
  1049. MODULE_DEVICE_TABLE(of, nixge_dt_ids);
  1050. static struct platform_driver nixge_driver = {
  1051. .probe = nixge_probe,
  1052. .remove = nixge_remove,
  1053. .driver = {
  1054. .name = "nixge",
  1055. .of_match_table = of_match_ptr(nixge_dt_ids),
  1056. },
  1057. };
  1058. module_platform_driver(nixge_driver);
  1059. MODULE_LICENSE("GPL v2");
  1060. MODULE_DESCRIPTION("National Instruments XGE Management MAC");
  1061. MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");