nb8800.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542
  1. /*
  2. * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
  3. *
  4. * Mostly rewritten, based on driver from Sigma Designs. Original
  5. * copyright notice below.
  6. *
  7. *
  8. * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
  9. *
  10. * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/delay.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/of_device.h>
  29. #include <linux/of_mdio.h>
  30. #include <linux/of_net.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/phy.h>
  33. #include <linux/cache.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/io.h>
  36. #include <linux/iopoll.h>
  37. #include <asm/barrier.h>
  38. #include "nb8800.h"
  39. static void nb8800_tx_done(struct net_device *dev);
  40. static int nb8800_dma_stop(struct net_device *dev);
  41. static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
  42. {
  43. return readb_relaxed(priv->base + reg);
  44. }
  45. static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
  46. {
  47. return readl_relaxed(priv->base + reg);
  48. }
  49. static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
  50. {
  51. writeb_relaxed(val, priv->base + reg);
  52. }
  53. static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
  54. {
  55. writew_relaxed(val, priv->base + reg);
  56. }
  57. static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
  58. {
  59. writel_relaxed(val, priv->base + reg);
  60. }
  61. static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
  62. u32 mask, u32 val)
  63. {
  64. u32 old = nb8800_readb(priv, reg);
  65. u32 new = (old & ~mask) | (val & mask);
  66. if (new != old)
  67. nb8800_writeb(priv, reg, new);
  68. }
  69. static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
  70. u32 mask, u32 val)
  71. {
  72. u32 old = nb8800_readl(priv, reg);
  73. u32 new = (old & ~mask) | (val & mask);
  74. if (new != old)
  75. nb8800_writel(priv, reg, new);
  76. }
  77. static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
  78. bool set)
  79. {
  80. nb8800_maskb(priv, reg, bits, set ? bits : 0);
  81. }
  82. static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
  83. {
  84. nb8800_maskb(priv, reg, bits, bits);
  85. }
  86. static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
  87. {
  88. nb8800_maskb(priv, reg, bits, 0);
  89. }
  90. static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
  91. bool set)
  92. {
  93. nb8800_maskl(priv, reg, bits, set ? bits : 0);
  94. }
  95. static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
  96. {
  97. nb8800_maskl(priv, reg, bits, bits);
  98. }
  99. static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
  100. {
  101. nb8800_maskl(priv, reg, bits, 0);
  102. }
  103. static int nb8800_mdio_wait(struct mii_bus *bus)
  104. {
  105. struct nb8800_priv *priv = bus->priv;
  106. u32 val;
  107. return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
  108. val, !(val & MDIO_CMD_GO), 1, 1000);
  109. }
  110. static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
  111. {
  112. struct nb8800_priv *priv = bus->priv;
  113. int err;
  114. err = nb8800_mdio_wait(bus);
  115. if (err)
  116. return err;
  117. nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
  118. udelay(10);
  119. nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
  120. return nb8800_mdio_wait(bus);
  121. }
  122. static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  123. {
  124. struct nb8800_priv *priv = bus->priv;
  125. u32 val;
  126. int err;
  127. err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
  128. if (err)
  129. return err;
  130. val = nb8800_readl(priv, NB8800_MDIO_STS);
  131. if (val & MDIO_STS_ERR)
  132. return 0xffff;
  133. return val & 0xffff;
  134. }
  135. static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
  136. {
  137. u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
  138. MDIO_CMD_DATA(val) | MDIO_CMD_WR;
  139. return nb8800_mdio_cmd(bus, cmd);
  140. }
  141. static void nb8800_mac_tx(struct net_device *dev, bool enable)
  142. {
  143. struct nb8800_priv *priv = netdev_priv(dev);
  144. while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
  145. cpu_relax();
  146. nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
  147. }
  148. static void nb8800_mac_rx(struct net_device *dev, bool enable)
  149. {
  150. nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
  151. }
  152. static void nb8800_mac_af(struct net_device *dev, bool enable)
  153. {
  154. nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
  155. }
  156. static void nb8800_start_rx(struct net_device *dev)
  157. {
  158. nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
  159. }
  160. static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
  161. {
  162. struct nb8800_priv *priv = netdev_priv(dev);
  163. struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
  164. struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
  165. int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
  166. dma_addr_t dma_addr;
  167. struct page *page;
  168. unsigned long offset;
  169. void *data;
  170. data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
  171. if (!data)
  172. return -ENOMEM;
  173. page = virt_to_head_page(data);
  174. offset = data - page_address(page);
  175. dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
  176. DMA_FROM_DEVICE);
  177. if (dma_mapping_error(&dev->dev, dma_addr)) {
  178. skb_free_frag(data);
  179. return -ENOMEM;
  180. }
  181. rxb->page = page;
  182. rxb->offset = offset;
  183. rxd->desc.s_addr = dma_addr;
  184. return 0;
  185. }
  186. static void nb8800_receive(struct net_device *dev, unsigned int i,
  187. unsigned int len)
  188. {
  189. struct nb8800_priv *priv = netdev_priv(dev);
  190. struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
  191. struct page *page = priv->rx_bufs[i].page;
  192. int offset = priv->rx_bufs[i].offset;
  193. void *data = page_address(page) + offset;
  194. dma_addr_t dma = rxd->desc.s_addr;
  195. struct sk_buff *skb;
  196. unsigned int size;
  197. int err;
  198. size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
  199. skb = napi_alloc_skb(&priv->napi, size);
  200. if (!skb) {
  201. netdev_err(dev, "rx skb allocation failed\n");
  202. dev->stats.rx_dropped++;
  203. return;
  204. }
  205. if (len <= RX_COPYBREAK) {
  206. dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
  207. skb_put_data(skb, data, len);
  208. dma_sync_single_for_device(&dev->dev, dma, len,
  209. DMA_FROM_DEVICE);
  210. } else {
  211. err = nb8800_alloc_rx(dev, i, true);
  212. if (err) {
  213. netdev_err(dev, "rx buffer allocation failed\n");
  214. dev->stats.rx_dropped++;
  215. dev_kfree_skb(skb);
  216. return;
  217. }
  218. dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
  219. skb_put_data(skb, data, RX_COPYHDR);
  220. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
  221. offset + RX_COPYHDR, len - RX_COPYHDR,
  222. RX_BUF_SIZE);
  223. }
  224. skb->protocol = eth_type_trans(skb, dev);
  225. napi_gro_receive(&priv->napi, skb);
  226. }
  227. static void nb8800_rx_error(struct net_device *dev, u32 report)
  228. {
  229. if (report & RX_LENGTH_ERR)
  230. dev->stats.rx_length_errors++;
  231. if (report & RX_FCS_ERR)
  232. dev->stats.rx_crc_errors++;
  233. if (report & RX_FIFO_OVERRUN)
  234. dev->stats.rx_fifo_errors++;
  235. if (report & RX_ALIGNMENT_ERROR)
  236. dev->stats.rx_frame_errors++;
  237. dev->stats.rx_errors++;
  238. }
  239. static int nb8800_poll(struct napi_struct *napi, int budget)
  240. {
  241. struct net_device *dev = napi->dev;
  242. struct nb8800_priv *priv = netdev_priv(dev);
  243. struct nb8800_rx_desc *rxd;
  244. unsigned int last = priv->rx_eoc;
  245. unsigned int next;
  246. int work = 0;
  247. nb8800_tx_done(dev);
  248. again:
  249. do {
  250. unsigned int len;
  251. next = (last + 1) % RX_DESC_COUNT;
  252. rxd = &priv->rx_descs[next];
  253. if (!rxd->report)
  254. break;
  255. len = RX_BYTES_TRANSFERRED(rxd->report);
  256. if (IS_RX_ERROR(rxd->report))
  257. nb8800_rx_error(dev, rxd->report);
  258. else
  259. nb8800_receive(dev, next, len);
  260. dev->stats.rx_packets++;
  261. dev->stats.rx_bytes += len;
  262. if (rxd->report & RX_MULTICAST_PKT)
  263. dev->stats.multicast++;
  264. rxd->report = 0;
  265. last = next;
  266. work++;
  267. } while (work < budget);
  268. if (work) {
  269. priv->rx_descs[last].desc.config |= DESC_EOC;
  270. wmb(); /* ensure new EOC is written before clearing old */
  271. priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
  272. priv->rx_eoc = last;
  273. nb8800_start_rx(dev);
  274. }
  275. if (work < budget) {
  276. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
  277. /* If a packet arrived after we last checked but
  278. * before writing RX_ITR, the interrupt will be
  279. * delayed, so we retrieve it now.
  280. */
  281. if (priv->rx_descs[next].report)
  282. goto again;
  283. napi_complete_done(napi, work);
  284. }
  285. return work;
  286. }
  287. static void __nb8800_tx_dma_start(struct net_device *dev)
  288. {
  289. struct nb8800_priv *priv = netdev_priv(dev);
  290. struct nb8800_tx_buf *txb;
  291. u32 txc_cr;
  292. txb = &priv->tx_bufs[priv->tx_queue];
  293. if (!txb->ready)
  294. return;
  295. txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
  296. if (txc_cr & TCR_EN)
  297. return;
  298. nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
  299. wmb(); /* ensure desc addr is written before starting DMA */
  300. nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
  301. priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
  302. }
  303. static void nb8800_tx_dma_start(struct net_device *dev)
  304. {
  305. struct nb8800_priv *priv = netdev_priv(dev);
  306. spin_lock_irq(&priv->tx_lock);
  307. __nb8800_tx_dma_start(dev);
  308. spin_unlock_irq(&priv->tx_lock);
  309. }
  310. static void nb8800_tx_dma_start_irq(struct net_device *dev)
  311. {
  312. struct nb8800_priv *priv = netdev_priv(dev);
  313. spin_lock(&priv->tx_lock);
  314. __nb8800_tx_dma_start(dev);
  315. spin_unlock(&priv->tx_lock);
  316. }
  317. static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
  318. {
  319. struct nb8800_priv *priv = netdev_priv(dev);
  320. struct nb8800_tx_desc *txd;
  321. struct nb8800_tx_buf *txb;
  322. struct nb8800_dma_desc *desc;
  323. dma_addr_t dma_addr;
  324. unsigned int dma_len;
  325. unsigned int align;
  326. unsigned int next;
  327. if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
  328. netif_stop_queue(dev);
  329. return NETDEV_TX_BUSY;
  330. }
  331. align = (8 - (uintptr_t)skb->data) & 7;
  332. dma_len = skb->len - align;
  333. dma_addr = dma_map_single(&dev->dev, skb->data + align,
  334. dma_len, DMA_TO_DEVICE);
  335. if (dma_mapping_error(&dev->dev, dma_addr)) {
  336. netdev_err(dev, "tx dma mapping error\n");
  337. kfree_skb(skb);
  338. dev->stats.tx_dropped++;
  339. return NETDEV_TX_OK;
  340. }
  341. if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
  342. netif_stop_queue(dev);
  343. skb->xmit_more = 0;
  344. }
  345. next = priv->tx_next;
  346. txb = &priv->tx_bufs[next];
  347. txd = &priv->tx_descs[next];
  348. desc = &txd->desc[0];
  349. next = (next + 1) % TX_DESC_COUNT;
  350. if (align) {
  351. memcpy(txd->buf, skb->data, align);
  352. desc->s_addr =
  353. txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
  354. desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
  355. desc->config = DESC_BTS(2) | DESC_DS | align;
  356. desc++;
  357. }
  358. desc->s_addr = dma_addr;
  359. desc->n_addr = priv->tx_bufs[next].dma_desc;
  360. desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
  361. if (!skb->xmit_more)
  362. desc->config |= DESC_EOC;
  363. txb->skb = skb;
  364. txb->dma_addr = dma_addr;
  365. txb->dma_len = dma_len;
  366. if (!priv->tx_chain) {
  367. txb->chain_len = 1;
  368. priv->tx_chain = txb;
  369. } else {
  370. priv->tx_chain->chain_len++;
  371. }
  372. netdev_sent_queue(dev, skb->len);
  373. priv->tx_next = next;
  374. if (!skb->xmit_more) {
  375. smp_wmb();
  376. priv->tx_chain->ready = true;
  377. priv->tx_chain = NULL;
  378. nb8800_tx_dma_start(dev);
  379. }
  380. return NETDEV_TX_OK;
  381. }
  382. static void nb8800_tx_error(struct net_device *dev, u32 report)
  383. {
  384. if (report & TX_LATE_COLLISION)
  385. dev->stats.collisions++;
  386. if (report & TX_PACKET_DROPPED)
  387. dev->stats.tx_dropped++;
  388. if (report & TX_FIFO_UNDERRUN)
  389. dev->stats.tx_fifo_errors++;
  390. dev->stats.tx_errors++;
  391. }
  392. static void nb8800_tx_done(struct net_device *dev)
  393. {
  394. struct nb8800_priv *priv = netdev_priv(dev);
  395. unsigned int limit = priv->tx_next;
  396. unsigned int done = priv->tx_done;
  397. unsigned int packets = 0;
  398. unsigned int len = 0;
  399. while (done != limit) {
  400. struct nb8800_tx_desc *txd = &priv->tx_descs[done];
  401. struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
  402. struct sk_buff *skb;
  403. if (!txd->report)
  404. break;
  405. skb = txb->skb;
  406. len += skb->len;
  407. dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
  408. DMA_TO_DEVICE);
  409. if (IS_TX_ERROR(txd->report)) {
  410. nb8800_tx_error(dev, txd->report);
  411. kfree_skb(skb);
  412. } else {
  413. consume_skb(skb);
  414. }
  415. dev->stats.tx_packets++;
  416. dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
  417. dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
  418. txb->skb = NULL;
  419. txb->ready = false;
  420. txd->report = 0;
  421. done = (done + 1) % TX_DESC_COUNT;
  422. packets++;
  423. }
  424. if (packets) {
  425. smp_mb__before_atomic();
  426. atomic_add(packets, &priv->tx_free);
  427. netdev_completed_queue(dev, packets, len);
  428. netif_wake_queue(dev);
  429. priv->tx_done = done;
  430. }
  431. }
  432. static irqreturn_t nb8800_irq(int irq, void *dev_id)
  433. {
  434. struct net_device *dev = dev_id;
  435. struct nb8800_priv *priv = netdev_priv(dev);
  436. irqreturn_t ret = IRQ_NONE;
  437. u32 val;
  438. /* tx interrupt */
  439. val = nb8800_readl(priv, NB8800_TXC_SR);
  440. if (val) {
  441. nb8800_writel(priv, NB8800_TXC_SR, val);
  442. if (val & TSR_DI)
  443. nb8800_tx_dma_start_irq(dev);
  444. if (val & TSR_TI)
  445. napi_schedule_irqoff(&priv->napi);
  446. if (unlikely(val & TSR_DE))
  447. netdev_err(dev, "TX DMA error\n");
  448. /* should never happen with automatic status retrieval */
  449. if (unlikely(val & TSR_TO))
  450. netdev_err(dev, "TX Status FIFO overflow\n");
  451. ret = IRQ_HANDLED;
  452. }
  453. /* rx interrupt */
  454. val = nb8800_readl(priv, NB8800_RXC_SR);
  455. if (val) {
  456. nb8800_writel(priv, NB8800_RXC_SR, val);
  457. if (likely(val & (RSR_RI | RSR_DI))) {
  458. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
  459. napi_schedule_irqoff(&priv->napi);
  460. }
  461. if (unlikely(val & RSR_DE))
  462. netdev_err(dev, "RX DMA error\n");
  463. /* should never happen with automatic status retrieval */
  464. if (unlikely(val & RSR_RO))
  465. netdev_err(dev, "RX Status FIFO overflow\n");
  466. ret = IRQ_HANDLED;
  467. }
  468. return ret;
  469. }
  470. static void nb8800_mac_config(struct net_device *dev)
  471. {
  472. struct nb8800_priv *priv = netdev_priv(dev);
  473. bool gigabit = priv->speed == SPEED_1000;
  474. u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
  475. u32 mac_mode = 0;
  476. u32 slot_time;
  477. u32 phy_clk;
  478. u32 ict;
  479. if (!priv->duplex)
  480. mac_mode |= HALF_DUPLEX;
  481. if (gigabit) {
  482. if (phy_interface_is_rgmii(dev->phydev))
  483. mac_mode |= RGMII_MODE;
  484. mac_mode |= GMAC_MODE;
  485. phy_clk = 125000000;
  486. /* Should be 512 but register is only 8 bits */
  487. slot_time = 255;
  488. } else {
  489. phy_clk = 25000000;
  490. slot_time = 128;
  491. }
  492. ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
  493. nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
  494. nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
  495. nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
  496. }
  497. static void nb8800_pause_config(struct net_device *dev)
  498. {
  499. struct nb8800_priv *priv = netdev_priv(dev);
  500. struct phy_device *phydev = dev->phydev;
  501. u32 rxcr;
  502. if (priv->pause_aneg) {
  503. if (!phydev || !phydev->link)
  504. return;
  505. priv->pause_rx = phydev->pause;
  506. priv->pause_tx = phydev->pause ^ phydev->asym_pause;
  507. }
  508. nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
  509. rxcr = nb8800_readl(priv, NB8800_RXC_CR);
  510. if (!!(rxcr & RCR_FL) == priv->pause_tx)
  511. return;
  512. if (netif_running(dev)) {
  513. napi_disable(&priv->napi);
  514. netif_tx_lock_bh(dev);
  515. nb8800_dma_stop(dev);
  516. nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
  517. nb8800_start_rx(dev);
  518. netif_tx_unlock_bh(dev);
  519. napi_enable(&priv->napi);
  520. } else {
  521. nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
  522. }
  523. }
  524. static void nb8800_link_reconfigure(struct net_device *dev)
  525. {
  526. struct nb8800_priv *priv = netdev_priv(dev);
  527. struct phy_device *phydev = dev->phydev;
  528. int change = 0;
  529. if (phydev->link) {
  530. if (phydev->speed != priv->speed) {
  531. priv->speed = phydev->speed;
  532. change = 1;
  533. }
  534. if (phydev->duplex != priv->duplex) {
  535. priv->duplex = phydev->duplex;
  536. change = 1;
  537. }
  538. if (change)
  539. nb8800_mac_config(dev);
  540. nb8800_pause_config(dev);
  541. }
  542. if (phydev->link != priv->link) {
  543. priv->link = phydev->link;
  544. change = 1;
  545. }
  546. if (change)
  547. phy_print_status(phydev);
  548. }
  549. static void nb8800_update_mac_addr(struct net_device *dev)
  550. {
  551. struct nb8800_priv *priv = netdev_priv(dev);
  552. int i;
  553. for (i = 0; i < ETH_ALEN; i++)
  554. nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
  555. for (i = 0; i < ETH_ALEN; i++)
  556. nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
  557. }
  558. static int nb8800_set_mac_address(struct net_device *dev, void *addr)
  559. {
  560. struct sockaddr *sock = addr;
  561. if (netif_running(dev))
  562. return -EBUSY;
  563. ether_addr_copy(dev->dev_addr, sock->sa_data);
  564. nb8800_update_mac_addr(dev);
  565. return 0;
  566. }
  567. static void nb8800_mc_init(struct net_device *dev, int val)
  568. {
  569. struct nb8800_priv *priv = netdev_priv(dev);
  570. nb8800_writeb(priv, NB8800_MC_INIT, val);
  571. readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
  572. 1, 1000);
  573. }
  574. static void nb8800_set_rx_mode(struct net_device *dev)
  575. {
  576. struct nb8800_priv *priv = netdev_priv(dev);
  577. struct netdev_hw_addr *ha;
  578. int i;
  579. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  580. nb8800_mac_af(dev, false);
  581. return;
  582. }
  583. nb8800_mac_af(dev, true);
  584. nb8800_mc_init(dev, 0);
  585. netdev_for_each_mc_addr(ha, dev) {
  586. for (i = 0; i < ETH_ALEN; i++)
  587. nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
  588. nb8800_mc_init(dev, 0xff);
  589. }
  590. }
  591. #define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
  592. #define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
  593. static void nb8800_dma_free(struct net_device *dev)
  594. {
  595. struct nb8800_priv *priv = netdev_priv(dev);
  596. unsigned int i;
  597. if (priv->rx_bufs) {
  598. for (i = 0; i < RX_DESC_COUNT; i++)
  599. if (priv->rx_bufs[i].page)
  600. put_page(priv->rx_bufs[i].page);
  601. kfree(priv->rx_bufs);
  602. priv->rx_bufs = NULL;
  603. }
  604. if (priv->tx_bufs) {
  605. for (i = 0; i < TX_DESC_COUNT; i++)
  606. kfree_skb(priv->tx_bufs[i].skb);
  607. kfree(priv->tx_bufs);
  608. priv->tx_bufs = NULL;
  609. }
  610. if (priv->rx_descs) {
  611. dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
  612. priv->rx_desc_dma);
  613. priv->rx_descs = NULL;
  614. }
  615. if (priv->tx_descs) {
  616. dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
  617. priv->tx_desc_dma);
  618. priv->tx_descs = NULL;
  619. }
  620. }
  621. static void nb8800_dma_reset(struct net_device *dev)
  622. {
  623. struct nb8800_priv *priv = netdev_priv(dev);
  624. struct nb8800_rx_desc *rxd;
  625. struct nb8800_tx_desc *txd;
  626. unsigned int i;
  627. for (i = 0; i < RX_DESC_COUNT; i++) {
  628. dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
  629. rxd = &priv->rx_descs[i];
  630. rxd->desc.n_addr = rx_dma + sizeof(*rxd);
  631. rxd->desc.r_addr =
  632. rx_dma + offsetof(struct nb8800_rx_desc, report);
  633. rxd->desc.config = priv->rx_dma_config;
  634. rxd->report = 0;
  635. }
  636. rxd->desc.n_addr = priv->rx_desc_dma;
  637. rxd->desc.config |= DESC_EOC;
  638. priv->rx_eoc = RX_DESC_COUNT - 1;
  639. for (i = 0; i < TX_DESC_COUNT; i++) {
  640. struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
  641. dma_addr_t r_dma = txb->dma_desc +
  642. offsetof(struct nb8800_tx_desc, report);
  643. txd = &priv->tx_descs[i];
  644. txd->desc[0].r_addr = r_dma;
  645. txd->desc[1].r_addr = r_dma;
  646. txd->report = 0;
  647. }
  648. priv->tx_next = 0;
  649. priv->tx_queue = 0;
  650. priv->tx_done = 0;
  651. atomic_set(&priv->tx_free, TX_DESC_COUNT);
  652. nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
  653. wmb(); /* ensure all setup is written before starting */
  654. }
  655. static int nb8800_dma_init(struct net_device *dev)
  656. {
  657. struct nb8800_priv *priv = netdev_priv(dev);
  658. unsigned int n_rx = RX_DESC_COUNT;
  659. unsigned int n_tx = TX_DESC_COUNT;
  660. unsigned int i;
  661. int err;
  662. priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
  663. &priv->rx_desc_dma, GFP_KERNEL);
  664. if (!priv->rx_descs)
  665. goto err_out;
  666. priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
  667. if (!priv->rx_bufs)
  668. goto err_out;
  669. for (i = 0; i < n_rx; i++) {
  670. err = nb8800_alloc_rx(dev, i, false);
  671. if (err)
  672. goto err_out;
  673. }
  674. priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
  675. &priv->tx_desc_dma, GFP_KERNEL);
  676. if (!priv->tx_descs)
  677. goto err_out;
  678. priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
  679. if (!priv->tx_bufs)
  680. goto err_out;
  681. for (i = 0; i < n_tx; i++)
  682. priv->tx_bufs[i].dma_desc =
  683. priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
  684. nb8800_dma_reset(dev);
  685. return 0;
  686. err_out:
  687. nb8800_dma_free(dev);
  688. return -ENOMEM;
  689. }
  690. static int nb8800_dma_stop(struct net_device *dev)
  691. {
  692. struct nb8800_priv *priv = netdev_priv(dev);
  693. struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
  694. struct nb8800_tx_desc *txd = &priv->tx_descs[0];
  695. int retry = 5;
  696. u32 txcr;
  697. u32 rxcr;
  698. int err;
  699. unsigned int i;
  700. /* wait for tx to finish */
  701. err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
  702. !(txcr & TCR_EN) &&
  703. priv->tx_done == priv->tx_next,
  704. 1000, 1000000);
  705. if (err)
  706. return err;
  707. /* The rx DMA only stops if it reaches the end of chain.
  708. * To make this happen, we set the EOC flag on all rx
  709. * descriptors, put the device in loopback mode, and send
  710. * a few dummy frames. The interrupt handler will ignore
  711. * these since NAPI is disabled and no real frames are in
  712. * the tx queue.
  713. */
  714. for (i = 0; i < RX_DESC_COUNT; i++)
  715. priv->rx_descs[i].desc.config |= DESC_EOC;
  716. txd->desc[0].s_addr =
  717. txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
  718. txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
  719. memset(txd->buf, 0, sizeof(txd->buf));
  720. nb8800_mac_af(dev, false);
  721. nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
  722. do {
  723. nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
  724. wmb();
  725. nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
  726. err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
  727. rxcr, !(rxcr & RCR_EN),
  728. 1000, 100000);
  729. } while (err && --retry);
  730. nb8800_mac_af(dev, true);
  731. nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
  732. nb8800_dma_reset(dev);
  733. return retry ? 0 : -ETIMEDOUT;
  734. }
  735. static void nb8800_pause_adv(struct net_device *dev)
  736. {
  737. struct nb8800_priv *priv = netdev_priv(dev);
  738. struct phy_device *phydev = dev->phydev;
  739. u32 adv = 0;
  740. if (!phydev)
  741. return;
  742. if (priv->pause_rx)
  743. adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
  744. if (priv->pause_tx)
  745. adv ^= ADVERTISED_Asym_Pause;
  746. phydev->supported |= adv;
  747. phydev->advertising |= adv;
  748. }
  749. static int nb8800_open(struct net_device *dev)
  750. {
  751. struct nb8800_priv *priv = netdev_priv(dev);
  752. struct phy_device *phydev;
  753. int err;
  754. /* clear any pending interrupts */
  755. nb8800_writel(priv, NB8800_RXC_SR, 0xf);
  756. nb8800_writel(priv, NB8800_TXC_SR, 0xf);
  757. err = nb8800_dma_init(dev);
  758. if (err)
  759. return err;
  760. err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
  761. if (err)
  762. goto err_free_dma;
  763. nb8800_mac_rx(dev, true);
  764. nb8800_mac_tx(dev, true);
  765. phydev = of_phy_connect(dev, priv->phy_node,
  766. nb8800_link_reconfigure, 0,
  767. priv->phy_mode);
  768. if (!phydev) {
  769. err = -ENODEV;
  770. goto err_free_irq;
  771. }
  772. nb8800_pause_adv(dev);
  773. netdev_reset_queue(dev);
  774. napi_enable(&priv->napi);
  775. netif_start_queue(dev);
  776. nb8800_start_rx(dev);
  777. phy_start(phydev);
  778. return 0;
  779. err_free_irq:
  780. free_irq(dev->irq, dev);
  781. err_free_dma:
  782. nb8800_dma_free(dev);
  783. return err;
  784. }
  785. static int nb8800_stop(struct net_device *dev)
  786. {
  787. struct nb8800_priv *priv = netdev_priv(dev);
  788. struct phy_device *phydev = dev->phydev;
  789. phy_stop(phydev);
  790. netif_stop_queue(dev);
  791. napi_disable(&priv->napi);
  792. nb8800_dma_stop(dev);
  793. nb8800_mac_rx(dev, false);
  794. nb8800_mac_tx(dev, false);
  795. phy_disconnect(phydev);
  796. free_irq(dev->irq, dev);
  797. nb8800_dma_free(dev);
  798. return 0;
  799. }
  800. static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  801. {
  802. return phy_mii_ioctl(dev->phydev, rq, cmd);
  803. }
  804. static const struct net_device_ops nb8800_netdev_ops = {
  805. .ndo_open = nb8800_open,
  806. .ndo_stop = nb8800_stop,
  807. .ndo_start_xmit = nb8800_xmit,
  808. .ndo_set_mac_address = nb8800_set_mac_address,
  809. .ndo_set_rx_mode = nb8800_set_rx_mode,
  810. .ndo_do_ioctl = nb8800_ioctl,
  811. .ndo_validate_addr = eth_validate_addr,
  812. };
  813. static void nb8800_get_pauseparam(struct net_device *dev,
  814. struct ethtool_pauseparam *pp)
  815. {
  816. struct nb8800_priv *priv = netdev_priv(dev);
  817. pp->autoneg = priv->pause_aneg;
  818. pp->rx_pause = priv->pause_rx;
  819. pp->tx_pause = priv->pause_tx;
  820. }
  821. static int nb8800_set_pauseparam(struct net_device *dev,
  822. struct ethtool_pauseparam *pp)
  823. {
  824. struct nb8800_priv *priv = netdev_priv(dev);
  825. struct phy_device *phydev = dev->phydev;
  826. priv->pause_aneg = pp->autoneg;
  827. priv->pause_rx = pp->rx_pause;
  828. priv->pause_tx = pp->tx_pause;
  829. nb8800_pause_adv(dev);
  830. if (!priv->pause_aneg)
  831. nb8800_pause_config(dev);
  832. else if (phydev)
  833. phy_start_aneg(phydev);
  834. return 0;
  835. }
  836. static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
  837. "rx_bytes_ok",
  838. "rx_frames_ok",
  839. "rx_undersize_frames",
  840. "rx_fragment_frames",
  841. "rx_64_byte_frames",
  842. "rx_127_byte_frames",
  843. "rx_255_byte_frames",
  844. "rx_511_byte_frames",
  845. "rx_1023_byte_frames",
  846. "rx_max_size_frames",
  847. "rx_oversize_frames",
  848. "rx_bad_fcs_frames",
  849. "rx_broadcast_frames",
  850. "rx_multicast_frames",
  851. "rx_control_frames",
  852. "rx_pause_frames",
  853. "rx_unsup_control_frames",
  854. "rx_align_error_frames",
  855. "rx_overrun_frames",
  856. "rx_jabber_frames",
  857. "rx_bytes",
  858. "rx_frames",
  859. "tx_bytes_ok",
  860. "tx_frames_ok",
  861. "tx_64_byte_frames",
  862. "tx_127_byte_frames",
  863. "tx_255_byte_frames",
  864. "tx_511_byte_frames",
  865. "tx_1023_byte_frames",
  866. "tx_max_size_frames",
  867. "tx_oversize_frames",
  868. "tx_broadcast_frames",
  869. "tx_multicast_frames",
  870. "tx_control_frames",
  871. "tx_pause_frames",
  872. "tx_underrun_frames",
  873. "tx_single_collision_frames",
  874. "tx_multi_collision_frames",
  875. "tx_deferred_collision_frames",
  876. "tx_late_collision_frames",
  877. "tx_excessive_collision_frames",
  878. "tx_bytes",
  879. "tx_frames",
  880. "tx_collisions",
  881. };
  882. #define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
  883. static int nb8800_get_sset_count(struct net_device *dev, int sset)
  884. {
  885. if (sset == ETH_SS_STATS)
  886. return NB8800_NUM_STATS;
  887. return -EOPNOTSUPP;
  888. }
  889. static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
  890. {
  891. if (sset == ETH_SS_STATS)
  892. memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
  893. }
  894. static u32 nb8800_read_stat(struct net_device *dev, int index)
  895. {
  896. struct nb8800_priv *priv = netdev_priv(dev);
  897. nb8800_writeb(priv, NB8800_STAT_INDEX, index);
  898. return nb8800_readl(priv, NB8800_STAT_DATA);
  899. }
  900. static void nb8800_get_ethtool_stats(struct net_device *dev,
  901. struct ethtool_stats *estats, u64 *st)
  902. {
  903. unsigned int i;
  904. u32 rx, tx;
  905. for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
  906. rx = nb8800_read_stat(dev, i);
  907. tx = nb8800_read_stat(dev, i | 0x80);
  908. st[i] = rx;
  909. st[i + NB8800_NUM_STATS / 2] = tx;
  910. }
  911. }
  912. static const struct ethtool_ops nb8800_ethtool_ops = {
  913. .nway_reset = phy_ethtool_nway_reset,
  914. .get_link = ethtool_op_get_link,
  915. .get_pauseparam = nb8800_get_pauseparam,
  916. .set_pauseparam = nb8800_set_pauseparam,
  917. .get_sset_count = nb8800_get_sset_count,
  918. .get_strings = nb8800_get_strings,
  919. .get_ethtool_stats = nb8800_get_ethtool_stats,
  920. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  921. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  922. };
  923. static int nb8800_hw_init(struct net_device *dev)
  924. {
  925. struct nb8800_priv *priv = netdev_priv(dev);
  926. u32 val;
  927. val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
  928. nb8800_writeb(priv, NB8800_TX_CTL1, val);
  929. /* Collision retry count */
  930. nb8800_writeb(priv, NB8800_TX_CTL2, 5);
  931. val = RX_PAD_STRIP | RX_AF_EN;
  932. nb8800_writeb(priv, NB8800_RX_CTL, val);
  933. /* Chosen by fair dice roll */
  934. nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
  935. /* TX cycles per deferral period */
  936. nb8800_writeb(priv, NB8800_TX_SDP, 12);
  937. /* The following three threshold values have been
  938. * experimentally determined for good results.
  939. */
  940. /* RX/TX FIFO threshold for partial empty (64-bit entries) */
  941. nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
  942. /* RX/TX FIFO threshold for partial full (64-bit entries) */
  943. nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
  944. /* Buffer size for transmit (64-bit entries) */
  945. nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
  946. /* Configure tx DMA */
  947. val = nb8800_readl(priv, NB8800_TXC_CR);
  948. val &= TCR_LE; /* keep endian setting */
  949. val |= TCR_DM; /* DMA descriptor mode */
  950. val |= TCR_RS; /* automatically store tx status */
  951. val |= TCR_DIE; /* interrupt on DMA chain completion */
  952. val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
  953. val |= TCR_BTS(2); /* 32-byte bus transaction size */
  954. nb8800_writel(priv, NB8800_TXC_CR, val);
  955. /* TX complete interrupt after 10 ms or 7 frames (see above) */
  956. val = clk_get_rate(priv->clk) / 100;
  957. nb8800_writel(priv, NB8800_TX_ITR, val);
  958. /* Configure rx DMA */
  959. val = nb8800_readl(priv, NB8800_RXC_CR);
  960. val &= RCR_LE; /* keep endian setting */
  961. val |= RCR_DM; /* DMA descriptor mode */
  962. val |= RCR_RS; /* automatically store rx status */
  963. val |= RCR_DIE; /* interrupt at end of DMA chain */
  964. val |= RCR_RFI(7); /* interrupt after 7 frames received */
  965. val |= RCR_BTS(2); /* 32-byte bus transaction size */
  966. nb8800_writel(priv, NB8800_RXC_CR, val);
  967. /* The rx interrupt can fire before the DMA has completed
  968. * unless a small delay is added. 50 us is hopefully enough.
  969. */
  970. priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
  971. /* In NAPI poll mode we want to disable interrupts, but the
  972. * hardware does not permit this. Delay 10 ms instead.
  973. */
  974. priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
  975. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
  976. priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
  977. /* Flow control settings */
  978. /* Pause time of 0.1 ms */
  979. val = 100000 / 512;
  980. nb8800_writeb(priv, NB8800_PQ1, val >> 8);
  981. nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
  982. /* Auto-negotiate by default */
  983. priv->pause_aneg = true;
  984. priv->pause_rx = true;
  985. priv->pause_tx = true;
  986. nb8800_mc_init(dev, 0);
  987. return 0;
  988. }
  989. static int nb8800_tangox_init(struct net_device *dev)
  990. {
  991. struct nb8800_priv *priv = netdev_priv(dev);
  992. u32 pad_mode = PAD_MODE_MII;
  993. switch (priv->phy_mode) {
  994. case PHY_INTERFACE_MODE_MII:
  995. case PHY_INTERFACE_MODE_GMII:
  996. pad_mode = PAD_MODE_MII;
  997. break;
  998. case PHY_INTERFACE_MODE_RGMII:
  999. case PHY_INTERFACE_MODE_RGMII_ID:
  1000. case PHY_INTERFACE_MODE_RGMII_RXID:
  1001. case PHY_INTERFACE_MODE_RGMII_TXID:
  1002. pad_mode = PAD_MODE_RGMII;
  1003. break;
  1004. default:
  1005. dev_err(dev->dev.parent, "unsupported phy mode %s\n",
  1006. phy_modes(priv->phy_mode));
  1007. return -EINVAL;
  1008. }
  1009. nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
  1010. return 0;
  1011. }
  1012. static int nb8800_tangox_reset(struct net_device *dev)
  1013. {
  1014. struct nb8800_priv *priv = netdev_priv(dev);
  1015. int clk_div;
  1016. nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
  1017. usleep_range(1000, 10000);
  1018. nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
  1019. wmb(); /* ensure reset is cleared before proceeding */
  1020. clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
  1021. nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
  1022. return 0;
  1023. }
  1024. static const struct nb8800_ops nb8800_tangox_ops = {
  1025. .init = nb8800_tangox_init,
  1026. .reset = nb8800_tangox_reset,
  1027. };
  1028. static int nb8800_tango4_init(struct net_device *dev)
  1029. {
  1030. struct nb8800_priv *priv = netdev_priv(dev);
  1031. int err;
  1032. err = nb8800_tangox_init(dev);
  1033. if (err)
  1034. return err;
  1035. /* On tango4 interrupt on DMA completion per frame works and gives
  1036. * better performance despite generating more rx interrupts.
  1037. */
  1038. /* Disable unnecessary interrupt on rx completion */
  1039. nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
  1040. /* Request interrupt on descriptor DMA completion */
  1041. priv->rx_dma_config |= DESC_ID;
  1042. return 0;
  1043. }
  1044. static const struct nb8800_ops nb8800_tango4_ops = {
  1045. .init = nb8800_tango4_init,
  1046. .reset = nb8800_tangox_reset,
  1047. };
  1048. static const struct of_device_id nb8800_dt_ids[] = {
  1049. {
  1050. .compatible = "aurora,nb8800",
  1051. },
  1052. {
  1053. .compatible = "sigma,smp8642-ethernet",
  1054. .data = &nb8800_tangox_ops,
  1055. },
  1056. {
  1057. .compatible = "sigma,smp8734-ethernet",
  1058. .data = &nb8800_tango4_ops,
  1059. },
  1060. { }
  1061. };
  1062. MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
  1063. static int nb8800_probe(struct platform_device *pdev)
  1064. {
  1065. const struct of_device_id *match;
  1066. const struct nb8800_ops *ops = NULL;
  1067. struct nb8800_priv *priv;
  1068. struct resource *res;
  1069. struct net_device *dev;
  1070. struct mii_bus *bus;
  1071. const unsigned char *mac;
  1072. void __iomem *base;
  1073. int irq;
  1074. int ret;
  1075. match = of_match_device(nb8800_dt_ids, &pdev->dev);
  1076. if (match)
  1077. ops = match->data;
  1078. irq = platform_get_irq(pdev, 0);
  1079. if (irq <= 0) {
  1080. dev_err(&pdev->dev, "No IRQ\n");
  1081. return -EINVAL;
  1082. }
  1083. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1084. base = devm_ioremap_resource(&pdev->dev, res);
  1085. if (IS_ERR(base))
  1086. return PTR_ERR(base);
  1087. dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
  1088. dev = alloc_etherdev(sizeof(*priv));
  1089. if (!dev)
  1090. return -ENOMEM;
  1091. platform_set_drvdata(pdev, dev);
  1092. SET_NETDEV_DEV(dev, &pdev->dev);
  1093. priv = netdev_priv(dev);
  1094. priv->base = base;
  1095. priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
  1096. if (priv->phy_mode < 0)
  1097. priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
  1098. priv->clk = devm_clk_get(&pdev->dev, NULL);
  1099. if (IS_ERR(priv->clk)) {
  1100. dev_err(&pdev->dev, "failed to get clock\n");
  1101. ret = PTR_ERR(priv->clk);
  1102. goto err_free_dev;
  1103. }
  1104. ret = clk_prepare_enable(priv->clk);
  1105. if (ret)
  1106. goto err_free_dev;
  1107. spin_lock_init(&priv->tx_lock);
  1108. if (ops && ops->reset) {
  1109. ret = ops->reset(dev);
  1110. if (ret)
  1111. goto err_disable_clk;
  1112. }
  1113. bus = devm_mdiobus_alloc(&pdev->dev);
  1114. if (!bus) {
  1115. ret = -ENOMEM;
  1116. goto err_disable_clk;
  1117. }
  1118. bus->name = "nb8800-mii";
  1119. bus->read = nb8800_mdio_read;
  1120. bus->write = nb8800_mdio_write;
  1121. bus->parent = &pdev->dev;
  1122. snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
  1123. (unsigned long)res->start);
  1124. bus->priv = priv;
  1125. ret = of_mdiobus_register(bus, pdev->dev.of_node);
  1126. if (ret) {
  1127. dev_err(&pdev->dev, "failed to register MII bus\n");
  1128. goto err_disable_clk;
  1129. }
  1130. if (of_phy_is_fixed_link(pdev->dev.of_node)) {
  1131. ret = of_phy_register_fixed_link(pdev->dev.of_node);
  1132. if (ret < 0) {
  1133. dev_err(&pdev->dev, "bad fixed-link spec\n");
  1134. goto err_free_bus;
  1135. }
  1136. priv->phy_node = of_node_get(pdev->dev.of_node);
  1137. }
  1138. if (!priv->phy_node)
  1139. priv->phy_node = of_parse_phandle(pdev->dev.of_node,
  1140. "phy-handle", 0);
  1141. if (!priv->phy_node) {
  1142. dev_err(&pdev->dev, "no PHY specified\n");
  1143. ret = -ENODEV;
  1144. goto err_free_bus;
  1145. }
  1146. priv->mii_bus = bus;
  1147. ret = nb8800_hw_init(dev);
  1148. if (ret)
  1149. goto err_deregister_fixed_link;
  1150. if (ops && ops->init) {
  1151. ret = ops->init(dev);
  1152. if (ret)
  1153. goto err_deregister_fixed_link;
  1154. }
  1155. dev->netdev_ops = &nb8800_netdev_ops;
  1156. dev->ethtool_ops = &nb8800_ethtool_ops;
  1157. dev->flags |= IFF_MULTICAST;
  1158. dev->irq = irq;
  1159. mac = of_get_mac_address(pdev->dev.of_node);
  1160. if (mac)
  1161. ether_addr_copy(dev->dev_addr, mac);
  1162. if (!is_valid_ether_addr(dev->dev_addr))
  1163. eth_hw_addr_random(dev);
  1164. nb8800_update_mac_addr(dev);
  1165. netif_carrier_off(dev);
  1166. ret = register_netdev(dev);
  1167. if (ret) {
  1168. netdev_err(dev, "failed to register netdev\n");
  1169. goto err_free_dma;
  1170. }
  1171. netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
  1172. netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
  1173. return 0;
  1174. err_free_dma:
  1175. nb8800_dma_free(dev);
  1176. err_deregister_fixed_link:
  1177. if (of_phy_is_fixed_link(pdev->dev.of_node))
  1178. of_phy_deregister_fixed_link(pdev->dev.of_node);
  1179. err_free_bus:
  1180. of_node_put(priv->phy_node);
  1181. mdiobus_unregister(bus);
  1182. err_disable_clk:
  1183. clk_disable_unprepare(priv->clk);
  1184. err_free_dev:
  1185. free_netdev(dev);
  1186. return ret;
  1187. }
  1188. static int nb8800_remove(struct platform_device *pdev)
  1189. {
  1190. struct net_device *ndev = platform_get_drvdata(pdev);
  1191. struct nb8800_priv *priv = netdev_priv(ndev);
  1192. unregister_netdev(ndev);
  1193. if (of_phy_is_fixed_link(pdev->dev.of_node))
  1194. of_phy_deregister_fixed_link(pdev->dev.of_node);
  1195. of_node_put(priv->phy_node);
  1196. mdiobus_unregister(priv->mii_bus);
  1197. clk_disable_unprepare(priv->clk);
  1198. nb8800_dma_free(ndev);
  1199. free_netdev(ndev);
  1200. return 0;
  1201. }
  1202. static struct platform_driver nb8800_driver = {
  1203. .driver = {
  1204. .name = "nb8800",
  1205. .of_match_table = nb8800_dt_ids,
  1206. },
  1207. .probe = nb8800_probe,
  1208. .remove = nb8800_remove,
  1209. };
  1210. module_platform_driver(nb8800_driver);
  1211. MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
  1212. MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
  1213. MODULE_LICENSE("GPL");