u_ether.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
  4. *
  5. * Copyright (C) 2003-2005,2008 David Brownell
  6. * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  7. * Copyright (C) 2008 Nokia Corporation
  8. */
  9. /* #define VERBOSE_DEBUG */
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/gfp.h>
  13. #include <linux/device.h>
  14. #include <linux/ctype.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/ethtool.h>
  17. #include <linux/if_vlan.h>
  18. #include "u_ether.h"
  19. /*
  20. * This component encapsulates the Ethernet link glue needed to provide
  21. * one (!) network link through the USB gadget stack, normally "usb0".
  22. *
  23. * The control and data models are handled by the function driver which
  24. * connects to this code; such as CDC Ethernet (ECM or EEM),
  25. * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
  26. * management.
  27. *
  28. * Link level addressing is handled by this component using module
  29. * parameters; if no such parameters are provided, random link level
  30. * addresses are used. Each end of the link uses one address. The
  31. * host end address is exported in various ways, and is often recorded
  32. * in configuration databases.
  33. *
  34. * The driver which assembles each configuration using such a link is
  35. * responsible for ensuring that each configuration includes at most one
  36. * instance of is network link. (The network layer provides ways for
  37. * this single "physical" link to be used by multiple virtual links.)
  38. */
  39. #define UETH__VERSION "29-May-2008"
  40. /* Experiments show that both Linux and Windows hosts allow up to 16k
  41. * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
  42. * blocks and still have efficient handling. */
  43. #define GETHER_MAX_MTU_SIZE 15412
  44. #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
  45. struct eth_dev {
  46. /* lock is held while accessing port_usb
  47. */
  48. spinlock_t lock;
  49. struct gether *port_usb;
  50. struct net_device *net;
  51. struct usb_gadget *gadget;
  52. spinlock_t req_lock; /* guard {rx,tx}_reqs */
  53. struct list_head tx_reqs, rx_reqs;
  54. atomic_t tx_qlen;
  55. struct sk_buff_head rx_frames;
  56. unsigned qmult;
  57. unsigned header_len;
  58. struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
  59. int (*unwrap)(struct gether *,
  60. struct sk_buff *skb,
  61. struct sk_buff_head *list);
  62. struct work_struct work;
  63. unsigned long todo;
  64. #define WORK_RX_MEMORY 0
  65. bool zlp;
  66. bool no_skb_reserve;
  67. u8 host_mac[ETH_ALEN];
  68. u8 dev_mac[ETH_ALEN];
  69. };
  70. /*-------------------------------------------------------------------------*/
  71. #define RX_EXTRA 20 /* bytes guarding against rx overflows */
  72. #define DEFAULT_QLEN 2 /* double buffering by default */
  73. /* for dual-speed hardware, use deeper queues at high/super speed */
  74. static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
  75. {
  76. if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  77. gadget->speed >= USB_SPEED_SUPER))
  78. return qmult * DEFAULT_QLEN;
  79. else
  80. return DEFAULT_QLEN;
  81. }
  82. /*-------------------------------------------------------------------------*/
  83. /* REVISIT there must be a better way than having two sets
  84. * of debug calls ...
  85. */
  86. #undef DBG
  87. #undef VDBG
  88. #undef ERROR
  89. #undef INFO
  90. #define xprintk(d, level, fmt, args...) \
  91. printk(level "%s: " fmt , (d)->net->name , ## args)
  92. #ifdef DEBUG
  93. #undef DEBUG
  94. #define DBG(dev, fmt, args...) \
  95. xprintk(dev , KERN_DEBUG , fmt , ## args)
  96. #else
  97. #define DBG(dev, fmt, args...) \
  98. do { } while (0)
  99. #endif /* DEBUG */
  100. #ifdef VERBOSE_DEBUG
  101. #define VDBG DBG
  102. #else
  103. #define VDBG(dev, fmt, args...) \
  104. do { } while (0)
  105. #endif /* DEBUG */
  106. #define ERROR(dev, fmt, args...) \
  107. xprintk(dev , KERN_ERR , fmt , ## args)
  108. #define INFO(dev, fmt, args...) \
  109. xprintk(dev , KERN_INFO , fmt , ## args)
  110. /*-------------------------------------------------------------------------*/
  111. /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
  112. static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
  113. {
  114. struct eth_dev *dev = netdev_priv(net);
  115. strlcpy(p->driver, "g_ether", sizeof(p->driver));
  116. strlcpy(p->version, UETH__VERSION, sizeof(p->version));
  117. strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
  118. strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
  119. }
  120. /* REVISIT can also support:
  121. * - WOL (by tracking suspends and issuing remote wakeup)
  122. * - msglevel (implies updated messaging)
  123. * - ... probably more ethtool ops
  124. */
  125. static const struct ethtool_ops ops = {
  126. .get_drvinfo = eth_get_drvinfo,
  127. .get_link = ethtool_op_get_link,
  128. };
  129. static void defer_kevent(struct eth_dev *dev, int flag)
  130. {
  131. if (test_and_set_bit(flag, &dev->todo))
  132. return;
  133. if (!schedule_work(&dev->work))
  134. ERROR(dev, "kevent %d may have been dropped\n", flag);
  135. else
  136. DBG(dev, "kevent %d scheduled\n", flag);
  137. }
  138. static void rx_complete(struct usb_ep *ep, struct usb_request *req);
  139. static int
  140. rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
  141. {
  142. struct usb_gadget *g = dev->gadget;
  143. struct sk_buff *skb;
  144. int retval = -ENOMEM;
  145. size_t size = 0;
  146. struct usb_ep *out;
  147. unsigned long flags;
  148. spin_lock_irqsave(&dev->lock, flags);
  149. if (dev->port_usb)
  150. out = dev->port_usb->out_ep;
  151. else
  152. out = NULL;
  153. if (!out)
  154. {
  155. spin_unlock_irqrestore(&dev->lock, flags);
  156. return -ENOTCONN;
  157. }
  158. /* Padding up to RX_EXTRA handles minor disagreements with host.
  159. * Normally we use the USB "terminate on short read" convention;
  160. * so allow up to (N*maxpacket), since that memory is normally
  161. * already allocated. Some hardware doesn't deal well with short
  162. * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
  163. * byte off the end (to force hardware errors on overflow).
  164. *
  165. * RNDIS uses internal framing, and explicitly allows senders to
  166. * pad to end-of-packet. That's potentially nice for speed, but
  167. * means receivers can't recover lost synch on their own (because
  168. * new packets don't only start after a short RX).
  169. */
  170. size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
  171. size += dev->port_usb->header_len;
  172. if (g->quirk_ep_out_aligned_size) {
  173. size += out->maxpacket - 1;
  174. size -= size % out->maxpacket;
  175. }
  176. if (dev->port_usb->is_fixed)
  177. size = max_t(size_t, size, dev->port_usb->fixed_out_len);
  178. spin_unlock_irqrestore(&dev->lock, flags);
  179. skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
  180. if (skb == NULL) {
  181. DBG(dev, "no rx skb\n");
  182. goto enomem;
  183. }
  184. /* Some platforms perform better when IP packets are aligned,
  185. * but on at least one, checksumming fails otherwise. Note:
  186. * RNDIS headers involve variable numbers of LE32 values.
  187. */
  188. if (likely(!dev->no_skb_reserve))
  189. skb_reserve(skb, NET_IP_ALIGN);
  190. req->buf = skb->data;
  191. req->length = size;
  192. req->complete = rx_complete;
  193. req->context = skb;
  194. retval = usb_ep_queue(out, req, gfp_flags);
  195. if (retval == -ENOMEM)
  196. enomem:
  197. defer_kevent(dev, WORK_RX_MEMORY);
  198. if (retval) {
  199. DBG(dev, "rx submit --> %d\n", retval);
  200. if (skb)
  201. dev_kfree_skb_any(skb);
  202. spin_lock_irqsave(&dev->req_lock, flags);
  203. list_add(&req->list, &dev->rx_reqs);
  204. spin_unlock_irqrestore(&dev->req_lock, flags);
  205. }
  206. return retval;
  207. }
  208. static void rx_complete(struct usb_ep *ep, struct usb_request *req)
  209. {
  210. struct sk_buff *skb = req->context, *skb2;
  211. struct eth_dev *dev = ep->driver_data;
  212. int status = req->status;
  213. switch (status) {
  214. /* normal completion */
  215. case 0:
  216. skb_put(skb, req->actual);
  217. if (dev->unwrap) {
  218. unsigned long flags;
  219. spin_lock_irqsave(&dev->lock, flags);
  220. if (dev->port_usb) {
  221. status = dev->unwrap(dev->port_usb,
  222. skb,
  223. &dev->rx_frames);
  224. } else {
  225. dev_kfree_skb_any(skb);
  226. status = -ENOTCONN;
  227. }
  228. spin_unlock_irqrestore(&dev->lock, flags);
  229. } else {
  230. skb_queue_tail(&dev->rx_frames, skb);
  231. }
  232. skb = NULL;
  233. skb2 = skb_dequeue(&dev->rx_frames);
  234. while (skb2) {
  235. if (status < 0
  236. || ETH_HLEN > skb2->len
  237. || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
  238. dev->net->stats.rx_errors++;
  239. dev->net->stats.rx_length_errors++;
  240. DBG(dev, "rx length %d\n", skb2->len);
  241. dev_kfree_skb_any(skb2);
  242. goto next_frame;
  243. }
  244. skb2->protocol = eth_type_trans(skb2, dev->net);
  245. dev->net->stats.rx_packets++;
  246. dev->net->stats.rx_bytes += skb2->len;
  247. /* no buffer copies needed, unless hardware can't
  248. * use skb buffers.
  249. */
  250. status = netif_rx(skb2);
  251. next_frame:
  252. skb2 = skb_dequeue(&dev->rx_frames);
  253. }
  254. break;
  255. /* software-driven interface shutdown */
  256. case -ECONNRESET: /* unlink */
  257. case -ESHUTDOWN: /* disconnect etc */
  258. VDBG(dev, "rx shutdown, code %d\n", status);
  259. goto quiesce;
  260. /* for hardware automagic (such as pxa) */
  261. case -ECONNABORTED: /* endpoint reset */
  262. DBG(dev, "rx %s reset\n", ep->name);
  263. defer_kevent(dev, WORK_RX_MEMORY);
  264. quiesce:
  265. dev_kfree_skb_any(skb);
  266. goto clean;
  267. /* data overrun */
  268. case -EOVERFLOW:
  269. dev->net->stats.rx_over_errors++;
  270. /* FALLTHROUGH */
  271. default:
  272. dev->net->stats.rx_errors++;
  273. DBG(dev, "rx status %d\n", status);
  274. break;
  275. }
  276. if (skb)
  277. dev_kfree_skb_any(skb);
  278. if (!netif_running(dev->net)) {
  279. clean:
  280. spin_lock(&dev->req_lock);
  281. list_add(&req->list, &dev->rx_reqs);
  282. spin_unlock(&dev->req_lock);
  283. req = NULL;
  284. }
  285. if (req)
  286. rx_submit(dev, req, GFP_ATOMIC);
  287. }
  288. static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
  289. {
  290. unsigned i;
  291. struct usb_request *req;
  292. if (!n)
  293. return -ENOMEM;
  294. /* queue/recycle up to N requests */
  295. i = n;
  296. list_for_each_entry(req, list, list) {
  297. if (i-- == 0)
  298. goto extra;
  299. }
  300. while (i--) {
  301. req = usb_ep_alloc_request(ep, GFP_ATOMIC);
  302. if (!req)
  303. return list_empty(list) ? -ENOMEM : 0;
  304. list_add(&req->list, list);
  305. }
  306. return 0;
  307. extra:
  308. /* free extras */
  309. for (;;) {
  310. struct list_head *next;
  311. next = req->list.next;
  312. list_del(&req->list);
  313. usb_ep_free_request(ep, req);
  314. if (next == list)
  315. break;
  316. req = container_of(next, struct usb_request, list);
  317. }
  318. return 0;
  319. }
  320. static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
  321. {
  322. int status;
  323. spin_lock(&dev->req_lock);
  324. status = prealloc(&dev->tx_reqs, link->in_ep, n);
  325. if (status < 0)
  326. goto fail;
  327. status = prealloc(&dev->rx_reqs, link->out_ep, n);
  328. if (status < 0)
  329. goto fail;
  330. goto done;
  331. fail:
  332. DBG(dev, "can't alloc requests\n");
  333. done:
  334. spin_unlock(&dev->req_lock);
  335. return status;
  336. }
  337. static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
  338. {
  339. struct usb_request *req;
  340. unsigned long flags;
  341. /* fill unused rxq slots with some skb */
  342. spin_lock_irqsave(&dev->req_lock, flags);
  343. while (!list_empty(&dev->rx_reqs)) {
  344. req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
  345. list_del_init(&req->list);
  346. spin_unlock_irqrestore(&dev->req_lock, flags);
  347. if (rx_submit(dev, req, gfp_flags) < 0) {
  348. defer_kevent(dev, WORK_RX_MEMORY);
  349. return;
  350. }
  351. spin_lock_irqsave(&dev->req_lock, flags);
  352. }
  353. spin_unlock_irqrestore(&dev->req_lock, flags);
  354. }
  355. static void eth_work(struct work_struct *work)
  356. {
  357. struct eth_dev *dev = container_of(work, struct eth_dev, work);
  358. if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
  359. if (netif_running(dev->net))
  360. rx_fill(dev, GFP_KERNEL);
  361. }
  362. if (dev->todo)
  363. DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
  364. }
  365. static void tx_complete(struct usb_ep *ep, struct usb_request *req)
  366. {
  367. struct sk_buff *skb = req->context;
  368. struct eth_dev *dev = ep->driver_data;
  369. switch (req->status) {
  370. default:
  371. dev->net->stats.tx_errors++;
  372. VDBG(dev, "tx err %d\n", req->status);
  373. /* FALLTHROUGH */
  374. case -ECONNRESET: /* unlink */
  375. case -ESHUTDOWN: /* disconnect etc */
  376. dev_kfree_skb_any(skb);
  377. break;
  378. case 0:
  379. dev->net->stats.tx_bytes += skb->len;
  380. dev_consume_skb_any(skb);
  381. }
  382. dev->net->stats.tx_packets++;
  383. spin_lock(&dev->req_lock);
  384. list_add(&req->list, &dev->tx_reqs);
  385. spin_unlock(&dev->req_lock);
  386. atomic_dec(&dev->tx_qlen);
  387. if (netif_carrier_ok(dev->net))
  388. netif_wake_queue(dev->net);
  389. }
  390. static inline int is_promisc(u16 cdc_filter)
  391. {
  392. return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
  393. }
  394. static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
  395. struct net_device *net)
  396. {
  397. struct eth_dev *dev = netdev_priv(net);
  398. int length = 0;
  399. int retval;
  400. struct usb_request *req = NULL;
  401. unsigned long flags;
  402. struct usb_ep *in;
  403. u16 cdc_filter;
  404. spin_lock_irqsave(&dev->lock, flags);
  405. if (dev->port_usb) {
  406. in = dev->port_usb->in_ep;
  407. cdc_filter = dev->port_usb->cdc_filter;
  408. } else {
  409. in = NULL;
  410. cdc_filter = 0;
  411. }
  412. spin_unlock_irqrestore(&dev->lock, flags);
  413. if (skb && !in) {
  414. dev_kfree_skb_any(skb);
  415. return NETDEV_TX_OK;
  416. }
  417. /* apply outgoing CDC or RNDIS filters */
  418. if (skb && !is_promisc(cdc_filter)) {
  419. u8 *dest = skb->data;
  420. if (is_multicast_ether_addr(dest)) {
  421. u16 type;
  422. /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
  423. * SET_ETHERNET_MULTICAST_FILTERS requests
  424. */
  425. if (is_broadcast_ether_addr(dest))
  426. type = USB_CDC_PACKET_TYPE_BROADCAST;
  427. else
  428. type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
  429. if (!(cdc_filter & type)) {
  430. dev_kfree_skb_any(skb);
  431. return NETDEV_TX_OK;
  432. }
  433. }
  434. /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
  435. }
  436. spin_lock_irqsave(&dev->req_lock, flags);
  437. /*
  438. * this freelist can be empty if an interrupt triggered disconnect()
  439. * and reconfigured the gadget (shutting down this queue) after the
  440. * network stack decided to xmit but before we got the spinlock.
  441. */
  442. if (list_empty(&dev->tx_reqs)) {
  443. spin_unlock_irqrestore(&dev->req_lock, flags);
  444. return NETDEV_TX_BUSY;
  445. }
  446. req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
  447. list_del(&req->list);
  448. /* temporarily stop TX queue when the freelist empties */
  449. if (list_empty(&dev->tx_reqs))
  450. netif_stop_queue(net);
  451. spin_unlock_irqrestore(&dev->req_lock, flags);
  452. /* no buffer copies needed, unless the network stack did it
  453. * or the hardware can't use skb buffers.
  454. * or there's not enough space for extra headers we need
  455. */
  456. if (dev->wrap) {
  457. unsigned long flags;
  458. spin_lock_irqsave(&dev->lock, flags);
  459. if (dev->port_usb)
  460. skb = dev->wrap(dev->port_usb, skb);
  461. spin_unlock_irqrestore(&dev->lock, flags);
  462. if (!skb) {
  463. /* Multi frame CDC protocols may store the frame for
  464. * later which is not a dropped frame.
  465. */
  466. if (dev->port_usb &&
  467. dev->port_usb->supports_multi_frame)
  468. goto multiframe;
  469. goto drop;
  470. }
  471. }
  472. length = skb->len;
  473. req->buf = skb->data;
  474. req->context = skb;
  475. req->complete = tx_complete;
  476. /* NCM requires no zlp if transfer is dwNtbInMaxSize */
  477. if (dev->port_usb &&
  478. dev->port_usb->is_fixed &&
  479. length == dev->port_usb->fixed_in_len &&
  480. (length % in->maxpacket) == 0)
  481. req->zero = 0;
  482. else
  483. req->zero = 1;
  484. /* use zlp framing on tx for strict CDC-Ether conformance,
  485. * though any robust network rx path ignores extra padding.
  486. * and some hardware doesn't like to write zlps.
  487. */
  488. if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
  489. length++;
  490. req->length = length;
  491. retval = usb_ep_queue(in, req, GFP_ATOMIC);
  492. switch (retval) {
  493. default:
  494. DBG(dev, "tx queue err %d\n", retval);
  495. break;
  496. case 0:
  497. netif_trans_update(net);
  498. atomic_inc(&dev->tx_qlen);
  499. }
  500. if (retval) {
  501. dev_kfree_skb_any(skb);
  502. drop:
  503. dev->net->stats.tx_dropped++;
  504. multiframe:
  505. spin_lock_irqsave(&dev->req_lock, flags);
  506. if (list_empty(&dev->tx_reqs))
  507. netif_start_queue(net);
  508. list_add(&req->list, &dev->tx_reqs);
  509. spin_unlock_irqrestore(&dev->req_lock, flags);
  510. }
  511. return NETDEV_TX_OK;
  512. }
  513. /*-------------------------------------------------------------------------*/
  514. static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
  515. {
  516. DBG(dev, "%s\n", __func__);
  517. /* fill the rx queue */
  518. rx_fill(dev, gfp_flags);
  519. /* and open the tx floodgates */
  520. atomic_set(&dev->tx_qlen, 0);
  521. netif_wake_queue(dev->net);
  522. }
  523. static int eth_open(struct net_device *net)
  524. {
  525. struct eth_dev *dev = netdev_priv(net);
  526. struct gether *link;
  527. DBG(dev, "%s\n", __func__);
  528. if (!netif_carrier_ok(dev->net)) {
  529. netif_carrier_on(dev->net);
  530. }
  531. if (netif_carrier_ok(dev->net))
  532. eth_start(dev, GFP_KERNEL);
  533. spin_lock_irq(&dev->lock);
  534. link = dev->port_usb;
  535. if (link && link->open)
  536. link->open(link);
  537. spin_unlock_irq(&dev->lock);
  538. return 0;
  539. }
  540. static int eth_stop(struct net_device *net)
  541. {
  542. struct eth_dev *dev = netdev_priv(net);
  543. unsigned long flags;
  544. VDBG(dev, "%s\n", __func__);
  545. netif_stop_queue(net);
  546. DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
  547. dev->net->stats.rx_packets, dev->net->stats.tx_packets,
  548. dev->net->stats.rx_errors, dev->net->stats.tx_errors
  549. );
  550. /* ensure there are no more active requests */
  551. spin_lock_irqsave(&dev->lock, flags);
  552. if (dev->port_usb) {
  553. struct gether *link = dev->port_usb;
  554. const struct usb_endpoint_descriptor *in;
  555. const struct usb_endpoint_descriptor *out;
  556. if (link->close)
  557. link->close(link);
  558. /* NOTE: we have no abort-queue primitive we could use
  559. * to cancel all pending I/O. Instead, we disable then
  560. * reenable the endpoints ... this idiom may leave toggle
  561. * wrong, but that's a self-correcting error.
  562. *
  563. * REVISIT: we *COULD* just let the transfers complete at
  564. * their own pace; the network stack can handle old packets.
  565. * For the moment we leave this here, since it works.
  566. */
  567. in = link->in_ep->desc;
  568. out = link->out_ep->desc;
  569. usb_ep_disable(link->in_ep);
  570. usb_ep_disable(link->out_ep);
  571. if (netif_carrier_ok(net)) {
  572. DBG(dev, "host still using in/out endpoints\n");
  573. link->in_ep->desc = in;
  574. link->out_ep->desc = out;
  575. usb_ep_enable(link->in_ep);
  576. usb_ep_enable(link->out_ep);
  577. }
  578. }
  579. spin_unlock_irqrestore(&dev->lock, flags);
  580. return 0;
  581. }
  582. /*-------------------------------------------------------------------------*/
  583. static int get_ether_addr(const char *str, u8 *dev_addr)
  584. {
  585. if (str) {
  586. unsigned i;
  587. for (i = 0; i < 6; i++) {
  588. unsigned char num;
  589. if ((*str == '.') || (*str == ':'))
  590. str++;
  591. num = hex_to_bin(*str++) << 4;
  592. num |= hex_to_bin(*str++);
  593. dev_addr [i] = num;
  594. }
  595. if (is_valid_ether_addr(dev_addr))
  596. return 0;
  597. }
  598. eth_random_addr(dev_addr);
  599. return 1;
  600. }
  601. static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
  602. {
  603. if (len < 18)
  604. return -EINVAL;
  605. snprintf(str, len, "%pM", dev_addr);
  606. return 18;
  607. }
  608. static const struct net_device_ops eth_netdev_ops = {
  609. .ndo_open = eth_open,
  610. .ndo_stop = eth_stop,
  611. .ndo_start_xmit = eth_start_xmit,
  612. .ndo_set_mac_address = eth_mac_addr,
  613. .ndo_validate_addr = eth_validate_addr,
  614. };
  615. static struct device_type gadget_type = {
  616. .name = "gadget",
  617. };
  618. /**
  619. * gether_setup_name - initialize one ethernet-over-usb link
  620. * @g: gadget to associated with these links
  621. * @ethaddr: NULL, or a buffer in which the ethernet address of the
  622. * host side of the link is recorded
  623. * @netname: name for network device (for example, "usb")
  624. * Context: may sleep
  625. *
  626. * This sets up the single network link that may be exported by a
  627. * gadget driver using this framework. The link layer addresses are
  628. * set up using module parameters.
  629. *
  630. * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
  631. */
  632. struct eth_dev *gether_setup_name(struct usb_gadget *g,
  633. const char *dev_addr, const char *host_addr,
  634. u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
  635. {
  636. struct eth_dev *dev;
  637. struct net_device *net;
  638. int status;
  639. net = alloc_etherdev(sizeof *dev);
  640. if (!net)
  641. return ERR_PTR(-ENOMEM);
  642. dev = netdev_priv(net);
  643. spin_lock_init(&dev->lock);
  644. spin_lock_init(&dev->req_lock);
  645. INIT_WORK(&dev->work, eth_work);
  646. INIT_LIST_HEAD(&dev->tx_reqs);
  647. INIT_LIST_HEAD(&dev->rx_reqs);
  648. skb_queue_head_init(&dev->rx_frames);
  649. /* network device setup */
  650. dev->net = net;
  651. dev->qmult = qmult;
  652. snprintf(net->name, sizeof(net->name), "%s%%d", netname);
  653. if (get_ether_addr(dev_addr, net->dev_addr))
  654. dev_warn(&g->dev,
  655. "using random %s ethernet address\n", "self");
  656. if (get_ether_addr(host_addr, dev->host_mac))
  657. dev_warn(&g->dev,
  658. "using random %s ethernet address\n", "host");
  659. if (ethaddr)
  660. memcpy(ethaddr, dev->host_mac, ETH_ALEN);
  661. net->netdev_ops = &eth_netdev_ops;
  662. net->ethtool_ops = &ops;
  663. /* MTU range: 14 - 15412 */
  664. net->min_mtu = ETH_HLEN;
  665. net->max_mtu = GETHER_MAX_MTU_SIZE;
  666. dev->gadget = g;
  667. SET_NETDEV_DEV(net, &g->dev);
  668. SET_NETDEV_DEVTYPE(net, &gadget_type);
  669. status = register_netdev(net);
  670. if (status < 0) {
  671. dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
  672. free_netdev(net);
  673. dev = ERR_PTR(status);
  674. } else {
  675. INFO(dev, "MAC %pM\n", net->dev_addr);
  676. INFO(dev, "HOST MAC %pM\n", dev->host_mac);
  677. /*
  678. * two kinds of host-initiated state changes:
  679. * - iff DATA transfer is active, carrier is "on"
  680. * - tx queueing enabled if open *and* carrier is "on"
  681. */
  682. netif_carrier_off(net);
  683. }
  684. return dev;
  685. }
  686. EXPORT_SYMBOL_GPL(gether_setup_name);
  687. struct net_device *gether_setup_name_default(const char *netname)
  688. {
  689. struct net_device *net;
  690. struct eth_dev *dev;
  691. net = alloc_etherdev(sizeof(*dev));
  692. if (!net)
  693. return ERR_PTR(-ENOMEM);
  694. dev = netdev_priv(net);
  695. spin_lock_init(&dev->lock);
  696. spin_lock_init(&dev->req_lock);
  697. INIT_WORK(&dev->work, eth_work);
  698. INIT_LIST_HEAD(&dev->tx_reqs);
  699. INIT_LIST_HEAD(&dev->rx_reqs);
  700. skb_queue_head_init(&dev->rx_frames);
  701. /* network device setup */
  702. dev->net = net;
  703. dev->qmult = QMULT_DEFAULT;
  704. snprintf(net->name, sizeof(net->name), "%s%%d", netname);
  705. eth_random_addr(dev->dev_mac);
  706. pr_warn("using random %s ethernet address\n", "self");
  707. eth_random_addr(dev->host_mac);
  708. pr_warn("using random %s ethernet address\n", "host");
  709. net->netdev_ops = &eth_netdev_ops;
  710. net->ethtool_ops = &ops;
  711. SET_NETDEV_DEVTYPE(net, &gadget_type);
  712. /* MTU range: 14 - 15412 */
  713. net->min_mtu = ETH_HLEN;
  714. net->max_mtu = GETHER_MAX_MTU_SIZE;
  715. return net;
  716. }
  717. EXPORT_SYMBOL_GPL(gether_setup_name_default);
  718. int gether_register_netdev(struct net_device *net)
  719. {
  720. struct eth_dev *dev;
  721. struct usb_gadget *g;
  722. struct sockaddr sa;
  723. int status;
  724. if (!net->dev.parent)
  725. return -EINVAL;
  726. dev = netdev_priv(net);
  727. g = dev->gadget;
  728. status = register_netdev(net);
  729. if (status < 0) {
  730. dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
  731. return status;
  732. } else {
  733. INFO(dev, "HOST MAC %pM\n", dev->host_mac);
  734. /* two kinds of host-initiated state changes:
  735. * - iff DATA transfer is active, carrier is "on"
  736. * - tx queueing enabled if open *and* carrier is "on"
  737. */
  738. netif_carrier_off(net);
  739. }
  740. sa.sa_family = net->type;
  741. memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
  742. rtnl_lock();
  743. status = dev_set_mac_address(net, &sa);
  744. rtnl_unlock();
  745. if (status)
  746. pr_warn("cannot set self ethernet address: %d\n", status);
  747. else
  748. INFO(dev, "MAC %pM\n", dev->dev_mac);
  749. return status;
  750. }
  751. EXPORT_SYMBOL_GPL(gether_register_netdev);
  752. void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
  753. {
  754. struct eth_dev *dev;
  755. dev = netdev_priv(net);
  756. dev->gadget = g;
  757. SET_NETDEV_DEV(net, &g->dev);
  758. }
  759. EXPORT_SYMBOL_GPL(gether_set_gadget);
  760. int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
  761. {
  762. struct eth_dev *dev;
  763. u8 new_addr[ETH_ALEN];
  764. dev = netdev_priv(net);
  765. if (get_ether_addr(dev_addr, new_addr))
  766. return -EINVAL;
  767. memcpy(dev->dev_mac, new_addr, ETH_ALEN);
  768. return 0;
  769. }
  770. EXPORT_SYMBOL_GPL(gether_set_dev_addr);
  771. int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
  772. {
  773. struct eth_dev *dev;
  774. int ret;
  775. dev = netdev_priv(net);
  776. ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
  777. if (ret + 1 < len) {
  778. dev_addr[ret++] = '\n';
  779. dev_addr[ret] = '\0';
  780. }
  781. return ret;
  782. }
  783. EXPORT_SYMBOL_GPL(gether_get_dev_addr);
  784. int gether_set_host_addr(struct net_device *net, const char *host_addr)
  785. {
  786. struct eth_dev *dev;
  787. u8 new_addr[ETH_ALEN];
  788. dev = netdev_priv(net);
  789. if (get_ether_addr(host_addr, new_addr))
  790. return -EINVAL;
  791. memcpy(dev->host_mac, new_addr, ETH_ALEN);
  792. return 0;
  793. }
  794. EXPORT_SYMBOL_GPL(gether_set_host_addr);
  795. int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
  796. {
  797. struct eth_dev *dev;
  798. int ret;
  799. dev = netdev_priv(net);
  800. ret = get_ether_addr_str(dev->host_mac, host_addr, len);
  801. if (ret + 1 < len) {
  802. host_addr[ret++] = '\n';
  803. host_addr[ret] = '\0';
  804. }
  805. return ret;
  806. }
  807. EXPORT_SYMBOL_GPL(gether_get_host_addr);
  808. int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
  809. {
  810. struct eth_dev *dev;
  811. if (len < 13)
  812. return -EINVAL;
  813. dev = netdev_priv(net);
  814. snprintf(host_addr, len, "%pm", dev->host_mac);
  815. return strlen(host_addr);
  816. }
  817. EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
  818. void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
  819. {
  820. struct eth_dev *dev;
  821. dev = netdev_priv(net);
  822. memcpy(host_mac, dev->host_mac, ETH_ALEN);
  823. }
  824. EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
  825. void gether_set_qmult(struct net_device *net, unsigned qmult)
  826. {
  827. struct eth_dev *dev;
  828. dev = netdev_priv(net);
  829. dev->qmult = qmult;
  830. }
  831. EXPORT_SYMBOL_GPL(gether_set_qmult);
  832. unsigned gether_get_qmult(struct net_device *net)
  833. {
  834. struct eth_dev *dev;
  835. dev = netdev_priv(net);
  836. return dev->qmult;
  837. }
  838. EXPORT_SYMBOL_GPL(gether_get_qmult);
  839. int gether_get_ifname(struct net_device *net, char *name, int len)
  840. {
  841. int ret;
  842. rtnl_lock();
  843. ret = snprintf(name, len, "%s\n", netdev_name(net));
  844. rtnl_unlock();
  845. return ret < len ? ret : len;
  846. }
  847. EXPORT_SYMBOL_GPL(gether_get_ifname);
  848. /**
  849. * gether_cleanup - remove Ethernet-over-USB device
  850. * Context: may sleep
  851. *
  852. * This is called to free all resources allocated by @gether_setup().
  853. */
  854. void gether_cleanup(struct eth_dev *dev)
  855. {
  856. if (!dev)
  857. return;
  858. unregister_netdev(dev->net);
  859. flush_work(&dev->work);
  860. free_netdev(dev->net);
  861. }
  862. EXPORT_SYMBOL_GPL(gether_cleanup);
  863. /**
  864. * gether_connect - notify network layer that USB link is active
  865. * @link: the USB link, set up with endpoints, descriptors matching
  866. * current device speed, and any framing wrapper(s) set up.
  867. * Context: irqs blocked
  868. *
  869. * This is called to activate endpoints and let the network layer know
  870. * the connection is active ("carrier detect"). It may cause the I/O
  871. * queues to open and start letting network packets flow, but will in
  872. * any case activate the endpoints so that they respond properly to the
  873. * USB host.
  874. *
  875. * Verify net_device pointer returned using IS_ERR(). If it doesn't
  876. * indicate some error code (negative errno), ep->driver_data values
  877. * have been overwritten.
  878. */
  879. struct net_device *gether_connect(struct gether *link)
  880. {
  881. struct eth_dev *dev = link->ioport;
  882. int result = 0;
  883. if (!dev)
  884. return ERR_PTR(-EINVAL);
  885. link->in_ep->driver_data = dev;
  886. result = usb_ep_enable(link->in_ep);
  887. if (result != 0) {
  888. DBG(dev, "enable %s --> %d\n",
  889. link->in_ep->name, result);
  890. goto fail0;
  891. }
  892. link->out_ep->driver_data = dev;
  893. result = usb_ep_enable(link->out_ep);
  894. if (result != 0) {
  895. DBG(dev, "enable %s --> %d\n",
  896. link->out_ep->name, result);
  897. goto fail1;
  898. }
  899. if (result == 0)
  900. result = alloc_requests(dev, link, qlen(dev->gadget,
  901. dev->qmult));
  902. if (result == 0) {
  903. dev->zlp = link->is_zlp_ok;
  904. dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
  905. DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
  906. dev->header_len = link->header_len;
  907. dev->unwrap = link->unwrap;
  908. dev->wrap = link->wrap;
  909. spin_lock(&dev->lock);
  910. dev->port_usb = link;
  911. if (netif_running(dev->net)) {
  912. if (link->open)
  913. link->open(link);
  914. } else {
  915. if (link->close)
  916. link->close(link);
  917. }
  918. spin_unlock(&dev->lock);
  919. netif_carrier_on(dev->net);
  920. if (netif_running(dev->net))
  921. eth_start(dev, GFP_ATOMIC);
  922. /* on error, disable any endpoints */
  923. } else {
  924. (void) usb_ep_disable(link->out_ep);
  925. fail1:
  926. (void) usb_ep_disable(link->in_ep);
  927. }
  928. fail0:
  929. /* caller is responsible for cleanup on error */
  930. if (result < 0)
  931. return ERR_PTR(result);
  932. return dev->net;
  933. }
  934. EXPORT_SYMBOL_GPL(gether_connect);
  935. /**
  936. * gether_disconnect - notify network layer that USB link is inactive
  937. * @link: the USB link, on which gether_connect() was called
  938. * Context: irqs blocked
  939. *
  940. * This is called to deactivate endpoints and let the network layer know
  941. * the connection went inactive ("no carrier").
  942. *
  943. * On return, the state is as if gether_connect() had never been called.
  944. * The endpoints are inactive, and accordingly without active USB I/O.
  945. * Pointers to endpoint descriptors and endpoint private data are nulled.
  946. */
  947. void gether_disconnect(struct gether *link)
  948. {
  949. struct eth_dev *dev = link->ioport;
  950. struct usb_request *req;
  951. WARN_ON(!dev);
  952. if (!dev)
  953. return;
  954. DBG(dev, "%s\n", __func__);
  955. netif_stop_queue(dev->net);
  956. netif_carrier_off(dev->net);
  957. /* disable endpoints, forcing (synchronous) completion
  958. * of all pending i/o. then free the request objects
  959. * and forget about the endpoints.
  960. */
  961. usb_ep_disable(link->in_ep);
  962. spin_lock(&dev->req_lock);
  963. while (!list_empty(&dev->tx_reqs)) {
  964. req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
  965. list_del(&req->list);
  966. spin_unlock(&dev->req_lock);
  967. usb_ep_free_request(link->in_ep, req);
  968. spin_lock(&dev->req_lock);
  969. }
  970. spin_unlock(&dev->req_lock);
  971. link->in_ep->desc = NULL;
  972. usb_ep_disable(link->out_ep);
  973. spin_lock(&dev->req_lock);
  974. while (!list_empty(&dev->rx_reqs)) {
  975. req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
  976. list_del(&req->list);
  977. spin_unlock(&dev->req_lock);
  978. usb_ep_free_request(link->out_ep, req);
  979. spin_lock(&dev->req_lock);
  980. }
  981. spin_unlock(&dev->req_lock);
  982. link->out_ep->desc = NULL;
  983. /* finish forgetting about this USB link episode */
  984. dev->header_len = 0;
  985. dev->unwrap = NULL;
  986. dev->wrap = NULL;
  987. spin_lock(&dev->lock);
  988. dev->port_usb = NULL;
  989. spin_unlock(&dev->lock);
  990. }
  991. EXPORT_SYMBOL_GPL(gether_disconnect);
  992. MODULE_LICENSE("GPL");
  993. MODULE_AUTHOR("David Brownell");