octeon_mgmt.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2009-2012 Cavium, Inc
  7. */
  8. #include <linux/platform_device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/capability.h>
  12. #include <linux/net_tstamp.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/if_vlan.h>
  17. #include <linux/of_mdio.h>
  18. #include <linux/module.h>
  19. #include <linux/of_net.h>
  20. #include <linux/init.h>
  21. #include <linux/slab.h>
  22. #include <linux/phy.h>
  23. #include <linux/io.h>
  24. #include <asm/octeon/octeon.h>
  25. #include <asm/octeon/cvmx-mixx-defs.h>
  26. #include <asm/octeon/cvmx-agl-defs.h>
  27. #define DRV_NAME "octeon_mgmt"
  28. #define DRV_VERSION "2.0"
  29. #define DRV_DESCRIPTION \
  30. "Cavium Networks Octeon MII (management) port Network Driver"
  31. #define OCTEON_MGMT_NAPI_WEIGHT 16
  32. /* Ring sizes that are powers of two allow for more efficient modulo
  33. * opertions.
  34. */
  35. #define OCTEON_MGMT_RX_RING_SIZE 512
  36. #define OCTEON_MGMT_TX_RING_SIZE 128
  37. /* Allow 8 bytes for vlan and FCS. */
  38. #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
  39. union mgmt_port_ring_entry {
  40. u64 d64;
  41. struct {
  42. #define RING_ENTRY_CODE_DONE 0xf
  43. #define RING_ENTRY_CODE_MORE 0x10
  44. #ifdef __BIG_ENDIAN_BITFIELD
  45. u64 reserved_62_63:2;
  46. /* Length of the buffer/packet in bytes */
  47. u64 len:14;
  48. /* For TX, signals that the packet should be timestamped */
  49. u64 tstamp:1;
  50. /* The RX error code */
  51. u64 code:7;
  52. /* Physical address of the buffer */
  53. u64 addr:40;
  54. #else
  55. u64 addr:40;
  56. u64 code:7;
  57. u64 tstamp:1;
  58. u64 len:14;
  59. u64 reserved_62_63:2;
  60. #endif
  61. } s;
  62. };
  63. #define MIX_ORING1 0x0
  64. #define MIX_ORING2 0x8
  65. #define MIX_IRING1 0x10
  66. #define MIX_IRING2 0x18
  67. #define MIX_CTL 0x20
  68. #define MIX_IRHWM 0x28
  69. #define MIX_IRCNT 0x30
  70. #define MIX_ORHWM 0x38
  71. #define MIX_ORCNT 0x40
  72. #define MIX_ISR 0x48
  73. #define MIX_INTENA 0x50
  74. #define MIX_REMCNT 0x58
  75. #define MIX_BIST 0x78
  76. #define AGL_GMX_PRT_CFG 0x10
  77. #define AGL_GMX_RX_FRM_CTL 0x18
  78. #define AGL_GMX_RX_FRM_MAX 0x30
  79. #define AGL_GMX_RX_JABBER 0x38
  80. #define AGL_GMX_RX_STATS_CTL 0x50
  81. #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
  82. #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
  83. #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
  84. #define AGL_GMX_RX_ADR_CTL 0x100
  85. #define AGL_GMX_RX_ADR_CAM_EN 0x108
  86. #define AGL_GMX_RX_ADR_CAM0 0x180
  87. #define AGL_GMX_RX_ADR_CAM1 0x188
  88. #define AGL_GMX_RX_ADR_CAM2 0x190
  89. #define AGL_GMX_RX_ADR_CAM3 0x198
  90. #define AGL_GMX_RX_ADR_CAM4 0x1a0
  91. #define AGL_GMX_RX_ADR_CAM5 0x1a8
  92. #define AGL_GMX_TX_CLK 0x208
  93. #define AGL_GMX_TX_STATS_CTL 0x268
  94. #define AGL_GMX_TX_CTL 0x270
  95. #define AGL_GMX_TX_STAT0 0x280
  96. #define AGL_GMX_TX_STAT1 0x288
  97. #define AGL_GMX_TX_STAT2 0x290
  98. #define AGL_GMX_TX_STAT3 0x298
  99. #define AGL_GMX_TX_STAT4 0x2a0
  100. #define AGL_GMX_TX_STAT5 0x2a8
  101. #define AGL_GMX_TX_STAT6 0x2b0
  102. #define AGL_GMX_TX_STAT7 0x2b8
  103. #define AGL_GMX_TX_STAT8 0x2c0
  104. #define AGL_GMX_TX_STAT9 0x2c8
  105. struct octeon_mgmt {
  106. struct net_device *netdev;
  107. u64 mix;
  108. u64 agl;
  109. u64 agl_prt_ctl;
  110. int port;
  111. int irq;
  112. bool has_rx_tstamp;
  113. u64 *tx_ring;
  114. dma_addr_t tx_ring_handle;
  115. unsigned int tx_next;
  116. unsigned int tx_next_clean;
  117. unsigned int tx_current_fill;
  118. /* The tx_list lock also protects the ring related variables */
  119. struct sk_buff_head tx_list;
  120. /* RX variables only touched in napi_poll. No locking necessary. */
  121. u64 *rx_ring;
  122. dma_addr_t rx_ring_handle;
  123. unsigned int rx_next;
  124. unsigned int rx_next_fill;
  125. unsigned int rx_current_fill;
  126. struct sk_buff_head rx_list;
  127. spinlock_t lock;
  128. unsigned int last_duplex;
  129. unsigned int last_link;
  130. unsigned int last_speed;
  131. struct device *dev;
  132. struct napi_struct napi;
  133. struct tasklet_struct tx_clean_tasklet;
  134. struct device_node *phy_np;
  135. resource_size_t mix_phys;
  136. resource_size_t mix_size;
  137. resource_size_t agl_phys;
  138. resource_size_t agl_size;
  139. resource_size_t agl_prt_ctl_phys;
  140. resource_size_t agl_prt_ctl_size;
  141. };
  142. static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
  143. {
  144. union cvmx_mixx_intena mix_intena;
  145. unsigned long flags;
  146. spin_lock_irqsave(&p->lock, flags);
  147. mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
  148. mix_intena.s.ithena = enable ? 1 : 0;
  149. cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
  150. spin_unlock_irqrestore(&p->lock, flags);
  151. }
  152. static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
  153. {
  154. union cvmx_mixx_intena mix_intena;
  155. unsigned long flags;
  156. spin_lock_irqsave(&p->lock, flags);
  157. mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
  158. mix_intena.s.othena = enable ? 1 : 0;
  159. cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
  160. spin_unlock_irqrestore(&p->lock, flags);
  161. }
  162. static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
  163. {
  164. octeon_mgmt_set_rx_irq(p, 1);
  165. }
  166. static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
  167. {
  168. octeon_mgmt_set_rx_irq(p, 0);
  169. }
  170. static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
  171. {
  172. octeon_mgmt_set_tx_irq(p, 1);
  173. }
  174. static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
  175. {
  176. octeon_mgmt_set_tx_irq(p, 0);
  177. }
  178. static unsigned int ring_max_fill(unsigned int ring_size)
  179. {
  180. return ring_size - 8;
  181. }
  182. static unsigned int ring_size_to_bytes(unsigned int ring_size)
  183. {
  184. return ring_size * sizeof(union mgmt_port_ring_entry);
  185. }
  186. static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
  187. {
  188. struct octeon_mgmt *p = netdev_priv(netdev);
  189. while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
  190. unsigned int size;
  191. union mgmt_port_ring_entry re;
  192. struct sk_buff *skb;
  193. /* CN56XX pass 1 needs 8 bytes of padding. */
  194. size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
  195. skb = netdev_alloc_skb(netdev, size);
  196. if (!skb)
  197. break;
  198. skb_reserve(skb, NET_IP_ALIGN);
  199. __skb_queue_tail(&p->rx_list, skb);
  200. re.d64 = 0;
  201. re.s.len = size;
  202. re.s.addr = dma_map_single(p->dev, skb->data,
  203. size,
  204. DMA_FROM_DEVICE);
  205. /* Put it in the ring. */
  206. p->rx_ring[p->rx_next_fill] = re.d64;
  207. /* Make sure there is no reorder of filling the ring and ringing
  208. * the bell
  209. */
  210. wmb();
  211. dma_sync_single_for_device(p->dev, p->rx_ring_handle,
  212. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  213. DMA_BIDIRECTIONAL);
  214. p->rx_next_fill =
  215. (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
  216. p->rx_current_fill++;
  217. /* Ring the bell. */
  218. cvmx_write_csr(p->mix + MIX_IRING2, 1);
  219. }
  220. }
  221. static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
  222. {
  223. union cvmx_mixx_orcnt mix_orcnt;
  224. union mgmt_port_ring_entry re;
  225. struct sk_buff *skb;
  226. int cleaned = 0;
  227. unsigned long flags;
  228. mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
  229. while (mix_orcnt.s.orcnt) {
  230. spin_lock_irqsave(&p->tx_list.lock, flags);
  231. mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
  232. if (mix_orcnt.s.orcnt == 0) {
  233. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  234. break;
  235. }
  236. dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
  237. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  238. DMA_BIDIRECTIONAL);
  239. re.d64 = p->tx_ring[p->tx_next_clean];
  240. p->tx_next_clean =
  241. (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
  242. skb = __skb_dequeue(&p->tx_list);
  243. mix_orcnt.u64 = 0;
  244. mix_orcnt.s.orcnt = 1;
  245. /* Acknowledge to hardware that we have the buffer. */
  246. cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
  247. p->tx_current_fill--;
  248. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  249. dma_unmap_single(p->dev, re.s.addr, re.s.len,
  250. DMA_TO_DEVICE);
  251. /* Read the hardware TX timestamp if one was recorded */
  252. if (unlikely(re.s.tstamp)) {
  253. struct skb_shared_hwtstamps ts;
  254. u64 ns;
  255. memset(&ts, 0, sizeof(ts));
  256. /* Read the timestamp */
  257. ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
  258. /* Remove the timestamp from the FIFO */
  259. cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
  260. /* Tell the kernel about the timestamp */
  261. ts.hwtstamp = ns_to_ktime(ns);
  262. skb_tstamp_tx(skb, &ts);
  263. }
  264. dev_kfree_skb_any(skb);
  265. cleaned++;
  266. mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
  267. }
  268. if (cleaned && netif_queue_stopped(p->netdev))
  269. netif_wake_queue(p->netdev);
  270. }
  271. static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
  272. {
  273. struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
  274. octeon_mgmt_clean_tx_buffers(p);
  275. octeon_mgmt_enable_tx_irq(p);
  276. }
  277. static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
  278. {
  279. struct octeon_mgmt *p = netdev_priv(netdev);
  280. unsigned long flags;
  281. u64 drop, bad;
  282. /* These reads also clear the count registers. */
  283. drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
  284. bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
  285. if (drop || bad) {
  286. /* Do an atomic update. */
  287. spin_lock_irqsave(&p->lock, flags);
  288. netdev->stats.rx_errors += bad;
  289. netdev->stats.rx_dropped += drop;
  290. spin_unlock_irqrestore(&p->lock, flags);
  291. }
  292. }
  293. static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
  294. {
  295. struct octeon_mgmt *p = netdev_priv(netdev);
  296. unsigned long flags;
  297. union cvmx_agl_gmx_txx_stat0 s0;
  298. union cvmx_agl_gmx_txx_stat1 s1;
  299. /* These reads also clear the count registers. */
  300. s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
  301. s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
  302. if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
  303. /* Do an atomic update. */
  304. spin_lock_irqsave(&p->lock, flags);
  305. netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
  306. netdev->stats.collisions += s1.s.scol + s1.s.mcol;
  307. spin_unlock_irqrestore(&p->lock, flags);
  308. }
  309. }
  310. /*
  311. * Dequeue a receive skb and its corresponding ring entry. The ring
  312. * entry is returned, *pskb is updated to point to the skb.
  313. */
  314. static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
  315. struct sk_buff **pskb)
  316. {
  317. union mgmt_port_ring_entry re;
  318. dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
  319. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  320. DMA_BIDIRECTIONAL);
  321. re.d64 = p->rx_ring[p->rx_next];
  322. p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
  323. p->rx_current_fill--;
  324. *pskb = __skb_dequeue(&p->rx_list);
  325. dma_unmap_single(p->dev, re.s.addr,
  326. ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
  327. DMA_FROM_DEVICE);
  328. return re.d64;
  329. }
  330. static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
  331. {
  332. struct net_device *netdev = p->netdev;
  333. union cvmx_mixx_ircnt mix_ircnt;
  334. union mgmt_port_ring_entry re;
  335. struct sk_buff *skb;
  336. struct sk_buff *skb2;
  337. struct sk_buff *skb_new;
  338. union mgmt_port_ring_entry re2;
  339. int rc = 1;
  340. re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
  341. if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
  342. /* A good packet, send it up. */
  343. skb_put(skb, re.s.len);
  344. good:
  345. /* Process the RX timestamp if it was recorded */
  346. if (p->has_rx_tstamp) {
  347. /* The first 8 bytes are the timestamp */
  348. u64 ns = *(u64 *)skb->data;
  349. struct skb_shared_hwtstamps *ts;
  350. ts = skb_hwtstamps(skb);
  351. ts->hwtstamp = ns_to_ktime(ns);
  352. __skb_pull(skb, 8);
  353. }
  354. skb->protocol = eth_type_trans(skb, netdev);
  355. netdev->stats.rx_packets++;
  356. netdev->stats.rx_bytes += skb->len;
  357. netif_receive_skb(skb);
  358. rc = 0;
  359. } else if (re.s.code == RING_ENTRY_CODE_MORE) {
  360. /* Packet split across skbs. This can happen if we
  361. * increase the MTU. Buffers that are already in the
  362. * rx ring can then end up being too small. As the rx
  363. * ring is refilled, buffers sized for the new MTU
  364. * will be used and we should go back to the normal
  365. * non-split case.
  366. */
  367. skb_put(skb, re.s.len);
  368. do {
  369. re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
  370. if (re2.s.code != RING_ENTRY_CODE_MORE
  371. && re2.s.code != RING_ENTRY_CODE_DONE)
  372. goto split_error;
  373. skb_put(skb2, re2.s.len);
  374. skb_new = skb_copy_expand(skb, 0, skb2->len,
  375. GFP_ATOMIC);
  376. if (!skb_new)
  377. goto split_error;
  378. if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
  379. skb2->len))
  380. goto split_error;
  381. skb_put(skb_new, skb2->len);
  382. dev_kfree_skb_any(skb);
  383. dev_kfree_skb_any(skb2);
  384. skb = skb_new;
  385. } while (re2.s.code == RING_ENTRY_CODE_MORE);
  386. goto good;
  387. } else {
  388. /* Some other error, discard it. */
  389. dev_kfree_skb_any(skb);
  390. /* Error statistics are accumulated in
  391. * octeon_mgmt_update_rx_stats.
  392. */
  393. }
  394. goto done;
  395. split_error:
  396. /* Discard the whole mess. */
  397. dev_kfree_skb_any(skb);
  398. dev_kfree_skb_any(skb2);
  399. while (re2.s.code == RING_ENTRY_CODE_MORE) {
  400. re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
  401. dev_kfree_skb_any(skb2);
  402. }
  403. netdev->stats.rx_errors++;
  404. done:
  405. /* Tell the hardware we processed a packet. */
  406. mix_ircnt.u64 = 0;
  407. mix_ircnt.s.ircnt = 1;
  408. cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
  409. return rc;
  410. }
  411. static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
  412. {
  413. unsigned int work_done = 0;
  414. union cvmx_mixx_ircnt mix_ircnt;
  415. int rc;
  416. mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
  417. while (work_done < budget && mix_ircnt.s.ircnt) {
  418. rc = octeon_mgmt_receive_one(p);
  419. if (!rc)
  420. work_done++;
  421. /* Check for more packets. */
  422. mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
  423. }
  424. octeon_mgmt_rx_fill_ring(p->netdev);
  425. return work_done;
  426. }
  427. static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
  428. {
  429. struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
  430. struct net_device *netdev = p->netdev;
  431. unsigned int work_done = 0;
  432. work_done = octeon_mgmt_receive_packets(p, budget);
  433. if (work_done < budget) {
  434. /* We stopped because no more packets were available. */
  435. napi_complete_done(napi, work_done);
  436. octeon_mgmt_enable_rx_irq(p);
  437. }
  438. octeon_mgmt_update_rx_stats(netdev);
  439. return work_done;
  440. }
  441. /* Reset the hardware to clean state. */
  442. static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
  443. {
  444. union cvmx_mixx_ctl mix_ctl;
  445. union cvmx_mixx_bist mix_bist;
  446. union cvmx_agl_gmx_bist agl_gmx_bist;
  447. mix_ctl.u64 = 0;
  448. cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
  449. do {
  450. mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
  451. } while (mix_ctl.s.busy);
  452. mix_ctl.s.reset = 1;
  453. cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
  454. cvmx_read_csr(p->mix + MIX_CTL);
  455. octeon_io_clk_delay(64);
  456. mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
  457. if (mix_bist.u64)
  458. dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
  459. (unsigned long long)mix_bist.u64);
  460. agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
  461. if (agl_gmx_bist.u64)
  462. dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
  463. (unsigned long long)agl_gmx_bist.u64);
  464. }
  465. struct octeon_mgmt_cam_state {
  466. u64 cam[6];
  467. u64 cam_mask;
  468. int cam_index;
  469. };
  470. static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
  471. unsigned char *addr)
  472. {
  473. int i;
  474. for (i = 0; i < 6; i++)
  475. cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
  476. cs->cam_mask |= (1ULL << cs->cam_index);
  477. cs->cam_index++;
  478. }
  479. static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
  480. {
  481. struct octeon_mgmt *p = netdev_priv(netdev);
  482. union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
  483. union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
  484. unsigned long flags;
  485. unsigned int prev_packet_enable;
  486. unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
  487. unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
  488. struct octeon_mgmt_cam_state cam_state;
  489. struct netdev_hw_addr *ha;
  490. int available_cam_entries;
  491. memset(&cam_state, 0, sizeof(cam_state));
  492. if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
  493. cam_mode = 0;
  494. available_cam_entries = 8;
  495. } else {
  496. /* One CAM entry for the primary address, leaves seven
  497. * for the secondary addresses.
  498. */
  499. available_cam_entries = 7 - netdev->uc.count;
  500. }
  501. if (netdev->flags & IFF_MULTICAST) {
  502. if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
  503. netdev_mc_count(netdev) > available_cam_entries)
  504. multicast_mode = 2; /* 2 - Accept all multicast. */
  505. else
  506. multicast_mode = 0; /* 0 - Use CAM. */
  507. }
  508. if (cam_mode == 1) {
  509. /* Add primary address. */
  510. octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
  511. netdev_for_each_uc_addr(ha, netdev)
  512. octeon_mgmt_cam_state_add(&cam_state, ha->addr);
  513. }
  514. if (multicast_mode == 0) {
  515. netdev_for_each_mc_addr(ha, netdev)
  516. octeon_mgmt_cam_state_add(&cam_state, ha->addr);
  517. }
  518. spin_lock_irqsave(&p->lock, flags);
  519. /* Disable packet I/O. */
  520. agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
  521. prev_packet_enable = agl_gmx_prtx.s.en;
  522. agl_gmx_prtx.s.en = 0;
  523. cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
  524. adr_ctl.u64 = 0;
  525. adr_ctl.s.cam_mode = cam_mode;
  526. adr_ctl.s.mcst = multicast_mode;
  527. adr_ctl.s.bcst = 1; /* Allow broadcast */
  528. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
  529. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
  530. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
  531. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
  532. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
  533. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
  534. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
  535. cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
  536. /* Restore packet I/O. */
  537. agl_gmx_prtx.s.en = prev_packet_enable;
  538. cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
  539. spin_unlock_irqrestore(&p->lock, flags);
  540. }
  541. static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
  542. {
  543. int r = eth_mac_addr(netdev, addr);
  544. if (r)
  545. return r;
  546. octeon_mgmt_set_rx_filtering(netdev);
  547. return 0;
  548. }
  549. static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
  550. {
  551. struct octeon_mgmt *p = netdev_priv(netdev);
  552. int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  553. netdev->mtu = new_mtu;
  554. /* HW lifts the limit if the frame is VLAN tagged
  555. * (+4 bytes per each tag, up to two tags)
  556. */
  557. cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
  558. /* Set the hardware to truncate packets larger than the MTU. The jabber
  559. * register must be set to a multiple of 8 bytes, so round up. JABBER is
  560. * an unconditional limit, so we need to account for two possible VLAN
  561. * tags.
  562. */
  563. cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
  564. (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
  565. return 0;
  566. }
  567. static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
  568. {
  569. struct net_device *netdev = dev_id;
  570. struct octeon_mgmt *p = netdev_priv(netdev);
  571. union cvmx_mixx_isr mixx_isr;
  572. mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
  573. /* Clear any pending interrupts */
  574. cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
  575. cvmx_read_csr(p->mix + MIX_ISR);
  576. if (mixx_isr.s.irthresh) {
  577. octeon_mgmt_disable_rx_irq(p);
  578. napi_schedule(&p->napi);
  579. }
  580. if (mixx_isr.s.orthresh) {
  581. octeon_mgmt_disable_tx_irq(p);
  582. tasklet_schedule(&p->tx_clean_tasklet);
  583. }
  584. return IRQ_HANDLED;
  585. }
  586. static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
  587. struct ifreq *rq, int cmd)
  588. {
  589. struct octeon_mgmt *p = netdev_priv(netdev);
  590. struct hwtstamp_config config;
  591. union cvmx_mio_ptp_clock_cfg ptp;
  592. union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
  593. bool have_hw_timestamps = false;
  594. if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
  595. return -EFAULT;
  596. if (config.flags) /* reserved for future extensions */
  597. return -EINVAL;
  598. /* Check the status of hardware for tiemstamps */
  599. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  600. /* Get the current state of the PTP clock */
  601. ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
  602. if (!ptp.s.ext_clk_en) {
  603. /* The clock has not been configured to use an
  604. * external source. Program it to use the main clock
  605. * reference.
  606. */
  607. u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
  608. if (!ptp.s.ptp_en)
  609. cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
  610. netdev_info(netdev,
  611. "PTP Clock using sclk reference @ %lldHz\n",
  612. (NSEC_PER_SEC << 32) / clock_comp);
  613. } else {
  614. /* The clock is already programmed to use a GPIO */
  615. u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
  616. netdev_info(netdev,
  617. "PTP Clock using GPIO%d @ %lld Hz\n",
  618. ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
  619. }
  620. /* Enable the clock if it wasn't done already */
  621. if (!ptp.s.ptp_en) {
  622. ptp.s.ptp_en = 1;
  623. cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
  624. }
  625. have_hw_timestamps = true;
  626. }
  627. if (!have_hw_timestamps)
  628. return -EINVAL;
  629. switch (config.tx_type) {
  630. case HWTSTAMP_TX_OFF:
  631. case HWTSTAMP_TX_ON:
  632. break;
  633. default:
  634. return -ERANGE;
  635. }
  636. switch (config.rx_filter) {
  637. case HWTSTAMP_FILTER_NONE:
  638. p->has_rx_tstamp = false;
  639. rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
  640. rxx_frm_ctl.s.ptp_mode = 0;
  641. cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
  642. break;
  643. case HWTSTAMP_FILTER_ALL:
  644. case HWTSTAMP_FILTER_SOME:
  645. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  646. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  647. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  648. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  649. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  650. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  651. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  652. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  653. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  654. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  655. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  656. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  657. case HWTSTAMP_FILTER_NTP_ALL:
  658. p->has_rx_tstamp = have_hw_timestamps;
  659. config.rx_filter = HWTSTAMP_FILTER_ALL;
  660. if (p->has_rx_tstamp) {
  661. rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
  662. rxx_frm_ctl.s.ptp_mode = 1;
  663. cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
  664. }
  665. break;
  666. default:
  667. return -ERANGE;
  668. }
  669. if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
  670. return -EFAULT;
  671. return 0;
  672. }
  673. static int octeon_mgmt_ioctl(struct net_device *netdev,
  674. struct ifreq *rq, int cmd)
  675. {
  676. switch (cmd) {
  677. case SIOCSHWTSTAMP:
  678. return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
  679. default:
  680. if (netdev->phydev)
  681. return phy_mii_ioctl(netdev->phydev, rq, cmd);
  682. return -EINVAL;
  683. }
  684. }
  685. static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
  686. {
  687. union cvmx_agl_gmx_prtx_cfg prtx_cfg;
  688. /* Disable GMX before we make any changes. */
  689. prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
  690. prtx_cfg.s.en = 0;
  691. prtx_cfg.s.tx_en = 0;
  692. prtx_cfg.s.rx_en = 0;
  693. cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
  694. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  695. int i;
  696. for (i = 0; i < 10; i++) {
  697. prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
  698. if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
  699. break;
  700. mdelay(1);
  701. i++;
  702. }
  703. }
  704. }
  705. static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
  706. {
  707. union cvmx_agl_gmx_prtx_cfg prtx_cfg;
  708. /* Restore the GMX enable state only if link is set */
  709. prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
  710. prtx_cfg.s.tx_en = 1;
  711. prtx_cfg.s.rx_en = 1;
  712. prtx_cfg.s.en = 1;
  713. cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
  714. }
  715. static void octeon_mgmt_update_link(struct octeon_mgmt *p)
  716. {
  717. struct net_device *ndev = p->netdev;
  718. struct phy_device *phydev = ndev->phydev;
  719. union cvmx_agl_gmx_prtx_cfg prtx_cfg;
  720. prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
  721. if (!phydev->link)
  722. prtx_cfg.s.duplex = 1;
  723. else
  724. prtx_cfg.s.duplex = phydev->duplex;
  725. switch (phydev->speed) {
  726. case 10:
  727. prtx_cfg.s.speed = 0;
  728. prtx_cfg.s.slottime = 0;
  729. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  730. prtx_cfg.s.burst = 1;
  731. prtx_cfg.s.speed_msb = 1;
  732. }
  733. break;
  734. case 100:
  735. prtx_cfg.s.speed = 0;
  736. prtx_cfg.s.slottime = 0;
  737. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  738. prtx_cfg.s.burst = 1;
  739. prtx_cfg.s.speed_msb = 0;
  740. }
  741. break;
  742. case 1000:
  743. /* 1000 MBits is only supported on 6XXX chips */
  744. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  745. prtx_cfg.s.speed = 1;
  746. prtx_cfg.s.speed_msb = 0;
  747. /* Only matters for half-duplex */
  748. prtx_cfg.s.slottime = 1;
  749. prtx_cfg.s.burst = phydev->duplex;
  750. }
  751. break;
  752. case 0: /* No link */
  753. default:
  754. break;
  755. }
  756. /* Write the new GMX setting with the port still disabled. */
  757. cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
  758. /* Read GMX CFG again to make sure the config is completed. */
  759. prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
  760. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  761. union cvmx_agl_gmx_txx_clk agl_clk;
  762. union cvmx_agl_prtx_ctl prtx_ctl;
  763. prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
  764. agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
  765. /* MII (both speeds) and RGMII 1000 speed. */
  766. agl_clk.s.clk_cnt = 1;
  767. if (prtx_ctl.s.mode == 0) { /* RGMII mode */
  768. if (phydev->speed == 10)
  769. agl_clk.s.clk_cnt = 50;
  770. else if (phydev->speed == 100)
  771. agl_clk.s.clk_cnt = 5;
  772. }
  773. cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
  774. }
  775. }
  776. static void octeon_mgmt_adjust_link(struct net_device *netdev)
  777. {
  778. struct octeon_mgmt *p = netdev_priv(netdev);
  779. struct phy_device *phydev = netdev->phydev;
  780. unsigned long flags;
  781. int link_changed = 0;
  782. if (!phydev)
  783. return;
  784. spin_lock_irqsave(&p->lock, flags);
  785. if (!phydev->link && p->last_link)
  786. link_changed = -1;
  787. if (phydev->link &&
  788. (p->last_duplex != phydev->duplex ||
  789. p->last_link != phydev->link ||
  790. p->last_speed != phydev->speed)) {
  791. octeon_mgmt_disable_link(p);
  792. link_changed = 1;
  793. octeon_mgmt_update_link(p);
  794. octeon_mgmt_enable_link(p);
  795. }
  796. p->last_link = phydev->link;
  797. p->last_speed = phydev->speed;
  798. p->last_duplex = phydev->duplex;
  799. spin_unlock_irqrestore(&p->lock, flags);
  800. if (link_changed != 0) {
  801. if (link_changed > 0)
  802. netdev_info(netdev, "Link is up - %d/%s\n",
  803. phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
  804. else
  805. netdev_info(netdev, "Link is down\n");
  806. }
  807. }
  808. static int octeon_mgmt_init_phy(struct net_device *netdev)
  809. {
  810. struct octeon_mgmt *p = netdev_priv(netdev);
  811. struct phy_device *phydev = NULL;
  812. if (octeon_is_simulation() || p->phy_np == NULL) {
  813. /* No PHYs in the simulator. */
  814. netif_carrier_on(netdev);
  815. return 0;
  816. }
  817. phydev = of_phy_connect(netdev, p->phy_np,
  818. octeon_mgmt_adjust_link, 0,
  819. PHY_INTERFACE_MODE_MII);
  820. if (!phydev)
  821. return -ENODEV;
  822. return 0;
  823. }
  824. static int octeon_mgmt_open(struct net_device *netdev)
  825. {
  826. struct octeon_mgmt *p = netdev_priv(netdev);
  827. union cvmx_mixx_ctl mix_ctl;
  828. union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
  829. union cvmx_mixx_oring1 oring1;
  830. union cvmx_mixx_iring1 iring1;
  831. union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
  832. union cvmx_mixx_irhwm mix_irhwm;
  833. union cvmx_mixx_orhwm mix_orhwm;
  834. union cvmx_mixx_intena mix_intena;
  835. struct sockaddr sa;
  836. /* Allocate ring buffers. */
  837. p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  838. GFP_KERNEL);
  839. if (!p->tx_ring)
  840. return -ENOMEM;
  841. p->tx_ring_handle =
  842. dma_map_single(p->dev, p->tx_ring,
  843. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  844. DMA_BIDIRECTIONAL);
  845. p->tx_next = 0;
  846. p->tx_next_clean = 0;
  847. p->tx_current_fill = 0;
  848. p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  849. GFP_KERNEL);
  850. if (!p->rx_ring)
  851. goto err_nomem;
  852. p->rx_ring_handle =
  853. dma_map_single(p->dev, p->rx_ring,
  854. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  855. DMA_BIDIRECTIONAL);
  856. p->rx_next = 0;
  857. p->rx_next_fill = 0;
  858. p->rx_current_fill = 0;
  859. octeon_mgmt_reset_hw(p);
  860. mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
  861. /* Bring it out of reset if needed. */
  862. if (mix_ctl.s.reset) {
  863. mix_ctl.s.reset = 0;
  864. cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
  865. do {
  866. mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
  867. } while (mix_ctl.s.reset);
  868. }
  869. if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
  870. agl_gmx_inf_mode.u64 = 0;
  871. agl_gmx_inf_mode.s.en = 1;
  872. cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
  873. }
  874. if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
  875. || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
  876. /* Force compensation values, as they are not
  877. * determined properly by HW
  878. */
  879. union cvmx_agl_gmx_drv_ctl drv_ctl;
  880. drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
  881. if (p->port) {
  882. drv_ctl.s.byp_en1 = 1;
  883. drv_ctl.s.nctl1 = 6;
  884. drv_ctl.s.pctl1 = 6;
  885. } else {
  886. drv_ctl.s.byp_en = 1;
  887. drv_ctl.s.nctl = 6;
  888. drv_ctl.s.pctl = 6;
  889. }
  890. cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
  891. }
  892. oring1.u64 = 0;
  893. oring1.s.obase = p->tx_ring_handle >> 3;
  894. oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
  895. cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
  896. iring1.u64 = 0;
  897. iring1.s.ibase = p->rx_ring_handle >> 3;
  898. iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
  899. cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
  900. memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
  901. octeon_mgmt_set_mac_address(netdev, &sa);
  902. octeon_mgmt_change_mtu(netdev, netdev->mtu);
  903. /* Enable the port HW. Packets are not allowed until
  904. * cvmx_mgmt_port_enable() is called.
  905. */
  906. mix_ctl.u64 = 0;
  907. mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
  908. mix_ctl.s.en = 1; /* Enable the port */
  909. mix_ctl.s.nbtarb = 0; /* Arbitration mode */
  910. /* MII CB-request FIFO programmable high watermark */
  911. mix_ctl.s.mrq_hwm = 1;
  912. #ifdef __LITTLE_ENDIAN
  913. mix_ctl.s.lendian = 1;
  914. #endif
  915. cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
  916. /* Read the PHY to find the mode of the interface. */
  917. if (octeon_mgmt_init_phy(netdev)) {
  918. dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
  919. goto err_noirq;
  920. }
  921. /* Set the mode of the interface, RGMII/MII. */
  922. if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
  923. union cvmx_agl_prtx_ctl agl_prtx_ctl;
  924. int rgmii_mode = (netdev->phydev->supported &
  925. (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
  926. agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
  927. agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
  928. cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
  929. /* MII clocks counts are based on the 125Mhz
  930. * reference, which has an 8nS period. So our delays
  931. * need to be multiplied by this factor.
  932. */
  933. #define NS_PER_PHY_CLK 8
  934. /* Take the DLL and clock tree out of reset */
  935. agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
  936. agl_prtx_ctl.s.clkrst = 0;
  937. if (rgmii_mode) {
  938. agl_prtx_ctl.s.dllrst = 0;
  939. agl_prtx_ctl.s.clktx_byp = 0;
  940. }
  941. cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
  942. cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
  943. /* Wait for the DLL to lock. External 125 MHz
  944. * reference clock must be stable at this point.
  945. */
  946. ndelay(256 * NS_PER_PHY_CLK);
  947. /* Enable the interface */
  948. agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
  949. agl_prtx_ctl.s.enable = 1;
  950. cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
  951. /* Read the value back to force the previous write */
  952. agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
  953. /* Enable the compensation controller */
  954. agl_prtx_ctl.s.comp = 1;
  955. agl_prtx_ctl.s.drv_byp = 0;
  956. cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
  957. /* Force write out before wait. */
  958. cvmx_read_csr(p->agl_prt_ctl);
  959. /* For compensation state to lock. */
  960. ndelay(1040 * NS_PER_PHY_CLK);
  961. /* Default Interframe Gaps are too small. Recommended
  962. * workaround is.
  963. *
  964. * AGL_GMX_TX_IFG[IFG1]=14
  965. * AGL_GMX_TX_IFG[IFG2]=10
  966. */
  967. cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
  968. }
  969. octeon_mgmt_rx_fill_ring(netdev);
  970. /* Clear statistics. */
  971. /* Clear on read. */
  972. cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
  973. cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
  974. cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
  975. cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
  976. cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
  977. cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
  978. /* Clear any pending interrupts */
  979. cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
  980. if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
  981. netdev)) {
  982. dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
  983. goto err_noirq;
  984. }
  985. /* Interrupt every single RX packet */
  986. mix_irhwm.u64 = 0;
  987. mix_irhwm.s.irhwm = 0;
  988. cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
  989. /* Interrupt when we have 1 or more packets to clean. */
  990. mix_orhwm.u64 = 0;
  991. mix_orhwm.s.orhwm = 0;
  992. cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
  993. /* Enable receive and transmit interrupts */
  994. mix_intena.u64 = 0;
  995. mix_intena.s.ithena = 1;
  996. mix_intena.s.othena = 1;
  997. cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
  998. /* Enable packet I/O. */
  999. rxx_frm_ctl.u64 = 0;
  1000. rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
  1001. rxx_frm_ctl.s.pre_align = 1;
  1002. /* When set, disables the length check for non-min sized pkts
  1003. * with padding in the client data.
  1004. */
  1005. rxx_frm_ctl.s.pad_len = 1;
  1006. /* When set, disables the length check for VLAN pkts */
  1007. rxx_frm_ctl.s.vlan_len = 1;
  1008. /* When set, PREAMBLE checking is less strict */
  1009. rxx_frm_ctl.s.pre_free = 1;
  1010. /* Control Pause Frames can match station SMAC */
  1011. rxx_frm_ctl.s.ctl_smac = 0;
  1012. /* Control Pause Frames can match globally assign Multicast address */
  1013. rxx_frm_ctl.s.ctl_mcst = 1;
  1014. /* Forward pause information to TX block */
  1015. rxx_frm_ctl.s.ctl_bck = 1;
  1016. /* Drop Control Pause Frames */
  1017. rxx_frm_ctl.s.ctl_drp = 1;
  1018. /* Strip off the preamble */
  1019. rxx_frm_ctl.s.pre_strp = 1;
  1020. /* This port is configured to send PREAMBLE+SFD to begin every
  1021. * frame. GMX checks that the PREAMBLE is sent correctly.
  1022. */
  1023. rxx_frm_ctl.s.pre_chk = 1;
  1024. cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
  1025. /* Configure the port duplex, speed and enables */
  1026. octeon_mgmt_disable_link(p);
  1027. if (netdev->phydev)
  1028. octeon_mgmt_update_link(p);
  1029. octeon_mgmt_enable_link(p);
  1030. p->last_link = 0;
  1031. p->last_speed = 0;
  1032. /* PHY is not present in simulator. The carrier is enabled
  1033. * while initializing the phy for simulator, leave it enabled.
  1034. */
  1035. if (netdev->phydev) {
  1036. netif_carrier_off(netdev);
  1037. phy_start_aneg(netdev->phydev);
  1038. }
  1039. netif_wake_queue(netdev);
  1040. napi_enable(&p->napi);
  1041. return 0;
  1042. err_noirq:
  1043. octeon_mgmt_reset_hw(p);
  1044. dma_unmap_single(p->dev, p->rx_ring_handle,
  1045. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  1046. DMA_BIDIRECTIONAL);
  1047. kfree(p->rx_ring);
  1048. err_nomem:
  1049. dma_unmap_single(p->dev, p->tx_ring_handle,
  1050. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  1051. DMA_BIDIRECTIONAL);
  1052. kfree(p->tx_ring);
  1053. return -ENOMEM;
  1054. }
  1055. static int octeon_mgmt_stop(struct net_device *netdev)
  1056. {
  1057. struct octeon_mgmt *p = netdev_priv(netdev);
  1058. napi_disable(&p->napi);
  1059. netif_stop_queue(netdev);
  1060. if (netdev->phydev)
  1061. phy_disconnect(netdev->phydev);
  1062. netif_carrier_off(netdev);
  1063. octeon_mgmt_reset_hw(p);
  1064. free_irq(p->irq, netdev);
  1065. /* dma_unmap is a nop on Octeon, so just free everything. */
  1066. skb_queue_purge(&p->tx_list);
  1067. skb_queue_purge(&p->rx_list);
  1068. dma_unmap_single(p->dev, p->rx_ring_handle,
  1069. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  1070. DMA_BIDIRECTIONAL);
  1071. kfree(p->rx_ring);
  1072. dma_unmap_single(p->dev, p->tx_ring_handle,
  1073. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  1074. DMA_BIDIRECTIONAL);
  1075. kfree(p->tx_ring);
  1076. return 0;
  1077. }
  1078. static netdev_tx_t
  1079. octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
  1080. {
  1081. struct octeon_mgmt *p = netdev_priv(netdev);
  1082. union mgmt_port_ring_entry re;
  1083. unsigned long flags;
  1084. netdev_tx_t rv = NETDEV_TX_BUSY;
  1085. re.d64 = 0;
  1086. re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
  1087. re.s.len = skb->len;
  1088. re.s.addr = dma_map_single(p->dev, skb->data,
  1089. skb->len,
  1090. DMA_TO_DEVICE);
  1091. spin_lock_irqsave(&p->tx_list.lock, flags);
  1092. if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
  1093. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  1094. netif_stop_queue(netdev);
  1095. spin_lock_irqsave(&p->tx_list.lock, flags);
  1096. }
  1097. if (unlikely(p->tx_current_fill >=
  1098. ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
  1099. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  1100. dma_unmap_single(p->dev, re.s.addr, re.s.len,
  1101. DMA_TO_DEVICE);
  1102. goto out;
  1103. }
  1104. __skb_queue_tail(&p->tx_list, skb);
  1105. /* Put it in the ring. */
  1106. p->tx_ring[p->tx_next] = re.d64;
  1107. p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
  1108. p->tx_current_fill++;
  1109. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  1110. dma_sync_single_for_device(p->dev, p->tx_ring_handle,
  1111. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  1112. DMA_BIDIRECTIONAL);
  1113. netdev->stats.tx_packets++;
  1114. netdev->stats.tx_bytes += skb->len;
  1115. /* Ring the bell. */
  1116. cvmx_write_csr(p->mix + MIX_ORING2, 1);
  1117. netif_trans_update(netdev);
  1118. rv = NETDEV_TX_OK;
  1119. out:
  1120. octeon_mgmt_update_tx_stats(netdev);
  1121. return rv;
  1122. }
  1123. #ifdef CONFIG_NET_POLL_CONTROLLER
  1124. static void octeon_mgmt_poll_controller(struct net_device *netdev)
  1125. {
  1126. struct octeon_mgmt *p = netdev_priv(netdev);
  1127. octeon_mgmt_receive_packets(p, 16);
  1128. octeon_mgmt_update_rx_stats(netdev);
  1129. }
  1130. #endif
  1131. static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
  1132. struct ethtool_drvinfo *info)
  1133. {
  1134. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  1135. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  1136. strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
  1137. strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
  1138. }
  1139. static int octeon_mgmt_nway_reset(struct net_device *dev)
  1140. {
  1141. if (!capable(CAP_NET_ADMIN))
  1142. return -EPERM;
  1143. if (dev->phydev)
  1144. return phy_start_aneg(dev->phydev);
  1145. return -EOPNOTSUPP;
  1146. }
  1147. static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
  1148. .get_drvinfo = octeon_mgmt_get_drvinfo,
  1149. .nway_reset = octeon_mgmt_nway_reset,
  1150. .get_link = ethtool_op_get_link,
  1151. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1152. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1153. };
  1154. static const struct net_device_ops octeon_mgmt_ops = {
  1155. .ndo_open = octeon_mgmt_open,
  1156. .ndo_stop = octeon_mgmt_stop,
  1157. .ndo_start_xmit = octeon_mgmt_xmit,
  1158. .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
  1159. .ndo_set_mac_address = octeon_mgmt_set_mac_address,
  1160. .ndo_do_ioctl = octeon_mgmt_ioctl,
  1161. .ndo_change_mtu = octeon_mgmt_change_mtu,
  1162. #ifdef CONFIG_NET_POLL_CONTROLLER
  1163. .ndo_poll_controller = octeon_mgmt_poll_controller,
  1164. #endif
  1165. };
  1166. static int octeon_mgmt_probe(struct platform_device *pdev)
  1167. {
  1168. struct net_device *netdev;
  1169. struct octeon_mgmt *p;
  1170. const __be32 *data;
  1171. const u8 *mac;
  1172. struct resource *res_mix;
  1173. struct resource *res_agl;
  1174. struct resource *res_agl_prt_ctl;
  1175. int len;
  1176. int result;
  1177. netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
  1178. if (netdev == NULL)
  1179. return -ENOMEM;
  1180. SET_NETDEV_DEV(netdev, &pdev->dev);
  1181. platform_set_drvdata(pdev, netdev);
  1182. p = netdev_priv(netdev);
  1183. netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
  1184. OCTEON_MGMT_NAPI_WEIGHT);
  1185. p->netdev = netdev;
  1186. p->dev = &pdev->dev;
  1187. p->has_rx_tstamp = false;
  1188. data = of_get_property(pdev->dev.of_node, "cell-index", &len);
  1189. if (data && len == sizeof(*data)) {
  1190. p->port = be32_to_cpup(data);
  1191. } else {
  1192. dev_err(&pdev->dev, "no 'cell-index' property\n");
  1193. result = -ENXIO;
  1194. goto err;
  1195. }
  1196. snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
  1197. result = platform_get_irq(pdev, 0);
  1198. if (result < 0)
  1199. goto err;
  1200. p->irq = result;
  1201. res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1202. if (res_mix == NULL) {
  1203. dev_err(&pdev->dev, "no 'reg' resource\n");
  1204. result = -ENXIO;
  1205. goto err;
  1206. }
  1207. res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1208. if (res_agl == NULL) {
  1209. dev_err(&pdev->dev, "no 'reg' resource\n");
  1210. result = -ENXIO;
  1211. goto err;
  1212. }
  1213. res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
  1214. if (res_agl_prt_ctl == NULL) {
  1215. dev_err(&pdev->dev, "no 'reg' resource\n");
  1216. result = -ENXIO;
  1217. goto err;
  1218. }
  1219. p->mix_phys = res_mix->start;
  1220. p->mix_size = resource_size(res_mix);
  1221. p->agl_phys = res_agl->start;
  1222. p->agl_size = resource_size(res_agl);
  1223. p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
  1224. p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
  1225. if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
  1226. res_mix->name)) {
  1227. dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
  1228. res_mix->name);
  1229. result = -ENXIO;
  1230. goto err;
  1231. }
  1232. if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
  1233. res_agl->name)) {
  1234. result = -ENXIO;
  1235. dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
  1236. res_agl->name);
  1237. goto err;
  1238. }
  1239. if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
  1240. p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
  1241. result = -ENXIO;
  1242. dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
  1243. res_agl_prt_ctl->name);
  1244. goto err;
  1245. }
  1246. p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
  1247. p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
  1248. p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
  1249. p->agl_prt_ctl_size);
  1250. if (!p->mix || !p->agl || !p->agl_prt_ctl) {
  1251. dev_err(&pdev->dev, "failed to map I/O memory\n");
  1252. result = -ENOMEM;
  1253. goto err;
  1254. }
  1255. spin_lock_init(&p->lock);
  1256. skb_queue_head_init(&p->tx_list);
  1257. skb_queue_head_init(&p->rx_list);
  1258. tasklet_init(&p->tx_clean_tasklet,
  1259. octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
  1260. netdev->priv_flags |= IFF_UNICAST_FLT;
  1261. netdev->netdev_ops = &octeon_mgmt_ops;
  1262. netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
  1263. netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
  1264. netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
  1265. mac = of_get_mac_address(pdev->dev.of_node);
  1266. if (mac)
  1267. memcpy(netdev->dev_addr, mac, ETH_ALEN);
  1268. else
  1269. eth_hw_addr_random(netdev);
  1270. p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1271. result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1272. if (result)
  1273. goto err;
  1274. netif_carrier_off(netdev);
  1275. result = register_netdev(netdev);
  1276. if (result)
  1277. goto err;
  1278. dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
  1279. return 0;
  1280. err:
  1281. of_node_put(p->phy_np);
  1282. free_netdev(netdev);
  1283. return result;
  1284. }
  1285. static int octeon_mgmt_remove(struct platform_device *pdev)
  1286. {
  1287. struct net_device *netdev = platform_get_drvdata(pdev);
  1288. struct octeon_mgmt *p = netdev_priv(netdev);
  1289. unregister_netdev(netdev);
  1290. of_node_put(p->phy_np);
  1291. free_netdev(netdev);
  1292. return 0;
  1293. }
  1294. static const struct of_device_id octeon_mgmt_match[] = {
  1295. {
  1296. .compatible = "cavium,octeon-5750-mix",
  1297. },
  1298. {},
  1299. };
  1300. MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
  1301. static struct platform_driver octeon_mgmt_driver = {
  1302. .driver = {
  1303. .name = "octeon_mgmt",
  1304. .of_match_table = octeon_mgmt_match,
  1305. },
  1306. .probe = octeon_mgmt_probe,
  1307. .remove = octeon_mgmt_remove,
  1308. };
  1309. extern void octeon_mdiobus_force_mod_depencency(void);
  1310. static int __init octeon_mgmt_mod_init(void)
  1311. {
  1312. /* Force our mdiobus driver module to be loaded first. */
  1313. octeon_mdiobus_force_mod_depencency();
  1314. return platform_driver_register(&octeon_mgmt_driver);
  1315. }
  1316. static void __exit octeon_mgmt_mod_exit(void)
  1317. {
  1318. platform_driver_unregister(&octeon_mgmt_driver);
  1319. }
  1320. module_init(octeon_mgmt_mod_init);
  1321. module_exit(octeon_mgmt_mod_exit);
  1322. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  1323. MODULE_AUTHOR("David Daney");
  1324. MODULE_LICENSE("GPL");
  1325. MODULE_VERSION(DRV_VERSION);