ether.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. #include "usb_os_adapter.h"
  2. #include "board.h"
  3. #include <linux/usb/cdc.h>
  4. #include "linux/usb/composite.h"
  5. #include "linux/usb/ether.h"
  6. #include <stdio.h>
  7. #include "FreeRTOS.h"
  8. #include "trace.h"
  9. #include "list.h"
  10. #include "task.h"
  11. #include "ark_dwc2.h"
  12. #if !USE_LWIP
  13. #include "FreeRTOS_IP.h"
  14. #include "FreeRTOS_IP_Private.h"
  15. #include "NetworkBufferManagement.h"
  16. #include "NetworkInterface.h"
  17. #else
  18. #include "queue.h"
  19. #include "lwip/pbuf.h"
  20. #include "ethernet.h"
  21. struct netif *ncm_netif = NULL;
  22. void ncm_net_set_intf(void* intf)
  23. {
  24. ncm_netif = (struct netif *)intf;
  25. }
  26. extern void ncm_ethernetif_input(void *h, struct pbuf* p);
  27. #endif
  28. #define UETH__VERSION "29-May-2008"
  29. #define WORK_RX_MEMORY 0
  30. struct eth_dev {
  31. spinlock_t lock;
  32. struct gether *port_usb;
  33. struct usb_gadget *gadget;
  34. spinlock_t req_lock;
  35. List_t tx_reqs;
  36. atomic_t tx_qlen;
  37. #if !USE_LWIP
  38. List_t rx_frames;
  39. #else
  40. struct pbuf rx_frames;
  41. #endif
  42. struct usb_request *out_req;
  43. unsigned header_len;
  44. bool zlp;
  45. u8 host_mac[ETH_ALEN];
  46. TaskHandle_t usb_ether_task;
  47. QueueHandle_t usb_ether_event_queue;
  48. int start;
  49. int tx_err_count;
  50. };
  51. struct eth_event
  52. {
  53. int type;
  54. void* priv;
  55. };
  56. #define USB_ETH_EVENT_USB_CONNECT 0
  57. #define USB_ETH_EVENT_USB_DISCONNECT 1
  58. #define USB_ETH_EVENT_USB_DATA_RX 2 // usb recv data
  59. #define USB_ETH_EVENT_USB_DATA_TX 3 // usb data tx ok notify
  60. #define USB_ETH_EVENT_NET_DATA_TX 4 // data from tcpip
  61. /*-------------------------------------------------------------------------*/
  62. #undef atomic_read
  63. #define atomic_read(v) ((v)->counter)
  64. #undef atomic_set
  65. #define atomic_set(v,i) ((v)->counter = (i))
  66. #undef atomic_inc
  67. #define atomic_inc(v) ((v)->counter++)
  68. #define RX_EXTRA 20 /* bytes guarding against rx overflows */
  69. #define DEFAULT_QLEN 4/* double buffering by default */
  70. static unsigned qmult = 2;
  71. static struct eth_dev *the_dev;
  72. /* for dual-speed hardware, use deeper queues at high/super speed */
  73. static inline int qlen(struct usb_gadget *gadget)
  74. {
  75. if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  76. gadget->speed == USB_SPEED_SUPER))
  77. return qmult * DEFAULT_QLEN;
  78. else
  79. return DEFAULT_QLEN * 8;
  80. }
  81. /*-------------------------------------------------------------------------*/
  82. #undef DBG
  83. #undef VDBG
  84. #undef ERROR
  85. #undef INFO
  86. #define xprintk(d, level, fmt, args...) \
  87. printk(level "%s: " fmt , (d)->net->name , ## args)
  88. #ifdef DEBUG
  89. #undef DEBUG
  90. #define DBG(dev, fmt, args...) \
  91. xprintk(dev , KERN_DEBUG , fmt , ## args)
  92. #else
  93. #define DBG(dev, fmt, args...) \
  94. do { } while (0)
  95. #endif /* DEBUG */
  96. #ifdef VERBOSE_DEBUG
  97. #define VDBG DBG
  98. #else
  99. #define VDBG(dev, fmt, args...) \
  100. do { } while (0)
  101. #endif /* DEBUG */
  102. #define ERROR(dev, fmt, args...) \
  103. xprintk(dev , KERN_ERR , fmt , ## args)
  104. #define INFO(dev, fmt, args...) \
  105. xprintk(dev , KERN_INFO , fmt , ## args)
  106. static void ether_disconnect(struct gether *link);
  107. static int ether_connect(struct eth_dev *dev, struct gether *link);
  108. static int rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags);
  109. static void rx_complete(struct usb_ep *ep, struct usb_request *req);
  110. int ncm_get_mac_address(char mac[6])
  111. {
  112. if (the_dev)
  113. memcpy(mac, the_dev->host_mac, 6);
  114. return 0;
  115. }
  116. static int prealloc(List_t *list, struct usb_ep *ep, unsigned n)
  117. {
  118. unsigned i;
  119. struct usb_request *req;
  120. ListItem_t *pxListItem = NULL;
  121. if (!n)
  122. return -ENOMEM;
  123. i = n;
  124. list_for_each_entry(pxListItem, req, list) {
  125. if (i-- == 0) {
  126. printf("prealloc out!\r\n");
  127. goto extra;
  128. }
  129. }
  130. while (i--) {
  131. req = usb_ep_alloc_request(ep, GFP_ATOMIC);
  132. if (!req)
  133. return list_empty(list) ? -ENOMEM : 0;
  134. vListInitialiseItem(&req->list);
  135. listSET_LIST_ITEM_OWNER(&(req->list), req);
  136. printf("req:%x is alloc\n", req);
  137. req->buf = NULL;
  138. req->length = 0;
  139. list_add_tail(&req->list, list);
  140. }
  141. return 0;
  142. extra:
  143. /* free extras */
  144. for (;;) {
  145. ListItem_t *next;
  146. next = listGET_NEXT(&req->list);
  147. uxListRemove(&req->list);
  148. usb_ep_free_request(ep, req);
  149. if (next == listGET_END_MARKER(list))
  150. break;
  151. req = listGET_LIST_ITEM_OWNER(&req->list);
  152. }
  153. return 0;
  154. }
  155. static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
  156. {
  157. int status;
  158. spin_lock(&dev->req_lock);
  159. status = prealloc(&dev->tx_reqs, link->in_ep, n);
  160. if (status < 0)
  161. goto fail;
  162. /*status = prealloc(&dev->rx_reqs, link->out_ep, n);
  163. if (status < 0)
  164. goto fail;*/
  165. goto done;
  166. fail:
  167. DBG(dev, "can't alloc requests\n");
  168. done:
  169. spin_unlock(&dev->req_lock);
  170. return status;
  171. }
  172. static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
  173. {
  174. //struct usb_request *req;
  175. //unsigned long flags;
  176. /* fill unused rxq slots with some skb */
  177. /*spin_lock_irqsave(&dev->req_lock, flags);
  178. while (!list_empty(&dev->rx_reqs)) {
  179. req = listGET_LIST_ITEM_OWNER(listGET_HEAD_ENTRY(&dev->rx_reqs));
  180. list_del_init(&req->list);
  181. spin_unlock_irqrestore(&dev->req_lock, flags);
  182. rx_submit(dev, req, 0);
  183. spin_lock_irqsave(&dev->req_lock, flags);
  184. }
  185. spin_unlock_irqrestore(&dev->req_lock, flags);*/
  186. rx_submit(dev, dev->out_req, 0);
  187. }
  188. static void tx_complete(struct usb_ep *ep, struct usb_request *req)
  189. {
  190. struct eth_dev *dev = ep->driver_data;
  191. struct eth_event ev;
  192. ev.type = USB_ETH_EVENT_USB_DATA_TX;
  193. ev.priv = (void*)req;
  194. xQueueSendFromISR(dev->usb_ether_event_queue, &ev, 0);
  195. }
  196. static void rx_complete(struct usb_ep *ep, struct usb_request *req)
  197. {
  198. struct eth_event ev;
  199. struct eth_dev *dev = ep->driver_data;
  200. if (req)
  201. //printf("%s:%d req->actual:%d\r\n", __func__, __LINE__, req->actual);
  202. ev.type = USB_ETH_EVENT_USB_DATA_RX;
  203. ev.priv = (void*)req;
  204. if (!xPortIsInInterrupt())
  205. xQueueSend(dev->usb_ether_event_queue, &ev, 0);
  206. else
  207. xQueueSendFromISR(dev->usb_ether_event_queue, &ev, 0);
  208. }
  209. static inline int is_promisc(u16 cdc_filter)
  210. {
  211. return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
  212. }
  213. static int rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
  214. {
  215. int retval = -ENOMEM;
  216. size_t size = 0;
  217. struct usb_ep *out;
  218. unsigned long flags;
  219. void *buf = NULL;
  220. spin_lock_irqsave(&dev->lock, flags);
  221. if (dev->port_usb)
  222. out = dev->port_usb->out_ep;
  223. else
  224. out = NULL;
  225. spin_unlock_irqrestore(&dev->lock, flags);
  226. if (!out)
  227. return -ENOTCONN;
  228. size += ETH_ALEN + ipconfigNETWORK_MTU + RX_EXTRA;
  229. size += dev->port_usb->header_len;
  230. if (out->maxpacket <= 0) {
  231. printf("maxpacket err\r\n");
  232. }
  233. size += out->maxpacket - 1;
  234. size -= size % out->maxpacket;
  235. if (dev->port_usb->is_fixed)
  236. size = max_t(size_t, size, dev->port_usb->fixed_out_len);
  237. buf = req->buf;
  238. if (NULL == req->buf || req->length < size) {
  239. if (NULL != req->buf)
  240. vPortFree(req->buf);
  241. buf = pvPortMalloc(size);
  242. if (buf == NULL) {
  243. return -ENOMEM;
  244. }
  245. }
  246. //memset(req->buf, 0, req->length);
  247. req->buf = buf;
  248. req->length = size;
  249. req->complete = rx_complete;
  250. req->context = NULL;
  251. retval = usb_ep_queue(out, req, 0);
  252. if (retval) {
  253. printf("rx submit failed--> %d\r\n", retval);
  254. /*spin_lock_irqsave(&dev->req_lock, flags);
  255. list_add_tail(&req->list, &dev->rx_reqs);
  256. spin_unlock_irqrestore(&dev->req_lock, flags);*/
  257. }
  258. return retval;
  259. }
  260. int usb_data_rx_proc(struct eth_dev* dev, struct usb_request *req)
  261. {
  262. int ret = -1;
  263. if (!dev->port_usb->connected) {
  264. printf("%s:%d usb is disconnected\r\n", __func__, __LINE__);
  265. return -1;
  266. }
  267. if (req->status != 0) {
  268. usb_ep_queue(dev->port_usb->out_ep, req, 0);
  269. return -1;
  270. }
  271. if (dev->port_usb && dev->port_usb->unwrap) {
  272. #if !USE_LWIP
  273. dev->port_usb->unwrap(dev->port_usb, req->buf, req->actual, &dev->rx_frames);
  274. #else
  275. dev->port_usb->unwrap(dev->port_usb, req->buf, req->actual, (List_t *)&dev->rx_frames);
  276. #endif
  277. }
  278. if (dev->port_usb->connected) {
  279. #if !USE_LWIP
  280. NetworkBufferDescriptor_t* pxBufferDescriptor = NULL;
  281. IPStackEvent_t xRxEvent;
  282. ListItem_t *pxListItem, *pxListItem1;
  283. list_for_each_entry_safe(pxListItem, pxListItem1, pxBufferDescriptor, &dev->rx_frames) {
  284. list_del_init(&pxBufferDescriptor->xBufferListItem);
  285. xRxEvent.eEventType = eNetworkRxEvent;
  286. xRxEvent.pvData = ( void * ) pxBufferDescriptor;
  287. if (0) {
  288. int i, dump_len = pxBufferDescriptor->xDataLength;
  289. printf("recv len:%d\r\n", dump_len);
  290. for (i = 0; i < dump_len; i++) {
  291. printf("%02x ", pxBufferDescriptor->pucEthernetBuffer[i]);
  292. if ((i + 1) % 16 == 0)
  293. printf("\r\n");
  294. }printf("\r\n");
  295. }
  296. if( xSendEventStructToIPTask( &xRxEvent, 0 ) == pdFALSE ) {
  297. vReleaseNetworkBufferAndDescriptor( pxBufferDescriptor );
  298. iptraceETHERNET_RX_EVENT_LOST();
  299. } else {
  300. iptraceNETWORK_INTERFACE_RECEIVE();
  301. }
  302. }
  303. #else
  304. if (dev->rx_frames.next) {
  305. struct pbuf *header = dev->rx_frames.next;
  306. while(header != NULL) {
  307. struct pbuf *current = header;
  308. header = header->next;
  309. current->next = NULL;
  310. ncm_ethernetif_input(ncm_netif, current);
  311. }
  312. dev->rx_frames.next = NULL;
  313. }
  314. #endif
  315. }
  316. if (req) {
  317. ret = rx_submit(dev, req, 0);
  318. if (ret != 0) {
  319. rx_fill(dev, 0);
  320. }
  321. }
  322. return 0;
  323. }
  324. void usb_data_tx_proc(struct eth_dev* dev, struct usb_request *req)
  325. {
  326. if (1) {
  327. #if !USE_LWIP
  328. NetworkBufferDescriptor_t *pxBufferDescriptor = (NetworkBufferDescriptor_t *)req->context;
  329. if (pxBufferDescriptor) {
  330. if (pxBufferDescriptor->pucEthernetBuffer) {
  331. vPortFree(pxBufferDescriptor->pucEthernetBuffer);
  332. pxBufferDescriptor->pucEthernetBuffer = NULL;
  333. }
  334. vReleaseNetworkBufferAndDescriptor(pxBufferDescriptor);
  335. }
  336. #else
  337. struct pbuf* pxBufferDescriptor = (struct pbuf*)req->context;//the buf is alloc by ncm, it must be freed.
  338. if (pxBufferDescriptor) {
  339. pbuf_free(pxBufferDescriptor);
  340. }
  341. #endif
  342. }
  343. spin_lock(&dev->req_lock);
  344. if ((dev->port_usb == NULL || !dev->port_usb->connected)
  345. && dev->gadget && dev->gadget->ep0) {
  346. req->buf = NULL;
  347. req->length = 0;
  348. usb_ep_free_request(dev->gadget->ep0, req);
  349. printf("%s #############.\n", __func__);
  350. } else {
  351. list_add_tail(&req->list, &dev->tx_reqs);
  352. }
  353. spin_unlock(&dev->req_lock);
  354. }
  355. static void free_net_buffer(void* desc_handle)
  356. {
  357. #if !USE_LWIP
  358. NetworkBufferDescriptor_t *pxBufferDescriptor = (NetworkBufferDescriptor_t*)desc_handle;
  359. if (NULL != pxBufferDescriptor)
  360. vReleaseNetworkBufferAndDescriptor(pxBufferDescriptor);
  361. #else
  362. struct pbuf* pxBufferDescriptor = (struct pbuf*)desc_handle;
  363. if (pxBufferDescriptor) {
  364. pbuf_free(pxBufferDescriptor);
  365. }
  366. #endif
  367. }
  368. int net_data_tx_proc(struct eth_dev* dev, void *pxBufferDescriptorHandle)
  369. {
  370. int length = 0;
  371. int retval;
  372. struct usb_request *req = NULL;
  373. unsigned long flags;
  374. struct usb_ep *in;
  375. u16 cdc_filter;
  376. #if !USE_LWIP
  377. NetworkBufferDescriptor_t *pxBufferDescriptor = (NetworkBufferDescriptor_t*)pxBufferDescriptorHandle;
  378. #else
  379. struct pbuf* pxBufferDescriptor = (struct pbuf*)pxBufferDescriptorHandle;
  380. #endif
  381. spin_lock_irqsave(&dev->lock, flags);
  382. if (dev->port_usb) {
  383. in = dev->port_usb->in_ep;
  384. cdc_filter = dev->port_usb->cdc_filter;
  385. } else {
  386. in = NULL;
  387. cdc_filter = 0;
  388. }
  389. spin_unlock_irqrestore(&dev->lock, flags);
  390. if (!in || !dev->port_usb->connected) {
  391. free_net_buffer((void*)pxBufferDescriptor);
  392. return 0;
  393. }
  394. /* apply outgoing CDC or RNDIS filters */
  395. if (!is_promisc(cdc_filter)) {
  396. #if !USE_LWIP
  397. u8 *dest = pxBufferDescriptor->pucEthernetBuffer;
  398. #else
  399. u8 *dest = pxBufferDescriptor->payload;
  400. #endif
  401. if (is_multicast_ether_addr(dest)) {
  402. u16 type;
  403. /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
  404. * SET_ETHERNET_MULTICAST_FILTERS requests
  405. */
  406. if (is_broadcast_ether_addr(dest))
  407. type = USB_CDC_PACKET_TYPE_BROADCAST;
  408. else
  409. type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
  410. if (!(cdc_filter & type)) {
  411. free_net_buffer((void*)pxBufferDescriptor);
  412. return 0;
  413. }
  414. }
  415. /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
  416. }
  417. spin_lock_irqsave(&dev->req_lock, flags);
  418. /*
  419. * this freelist can be empty if an interrupt triggered disconnect()
  420. * and reconfigured the gadget (shutting down this queue) after the
  421. * network stack decided to xmit but before we got the spinlock.
  422. */
  423. if (list_empty(&dev->tx_reqs)) {
  424. spin_unlock_irqrestore(&dev->req_lock, flags);
  425. printf("tx reqs empty\r\n");
  426. free_net_buffer((void*)pxBufferDescriptor);
  427. if (dev->tx_err_count++ > 3) {
  428. usb_dwc2_reset(0, 1);
  429. dev->tx_err_count = 0;
  430. }
  431. return -1;
  432. }
  433. dev->tx_err_count = 0;
  434. req = listGET_LIST_ITEM_OWNER(listGET_HEAD_ENTRY(&dev->tx_reqs));
  435. list_del(&req->list);
  436. /* temporarily stop TX queue when the freelist empties */
  437. if (list_empty(&dev->tx_reqs)) {
  438. }
  439. spin_unlock_irqrestore(&dev->req_lock, flags);
  440. /* no buffer copies needed, unless the network stack did it
  441. * or the hardware can't use skb buffers.
  442. * or there's not enough space for extra headers we need
  443. */
  444. #if !USE_LWIP
  445. spin_lock_irqsave(&dev->lock, flags);
  446. if (dev->port_usb && dev->port_usb->wrap)
  447. pxBufferDescriptor = dev->port_usb->wrap(dev->port_usb, pxBufferDescriptor);
  448. spin_unlock_irqrestore(&dev->lock, flags);
  449. if (!pxBufferDescriptor)
  450. goto drop;
  451. length = pxBufferDescriptor->xDataLength;
  452. req->buf = pxBufferDescriptor->pucEthernetBuffer;
  453. req->context = pxBufferDescriptor;
  454. req->complete = tx_complete;
  455. #else
  456. spin_lock_irqsave(&dev->lock, flags);
  457. if (dev->port_usb && dev->port_usb->wrap_ext)
  458. pxBufferDescriptor = (struct pbuf*)dev->port_usb->wrap_ext(dev->port_usb, (void*)pxBufferDescriptor);
  459. spin_unlock_irqrestore(&dev->lock, flags);
  460. if (!pxBufferDescriptor)
  461. goto drop;
  462. length = pxBufferDescriptor->len;
  463. req->buf = pxBufferDescriptor->payload;
  464. req->context = pxBufferDescriptor;
  465. req->complete = tx_complete;
  466. #endif
  467. /* NCM requires no zlp if transfer is dwNtbInMaxSize */
  468. if (dev->port_usb->is_fixed &&
  469. length == dev->port_usb->fixed_in_len &&
  470. (length % in->maxpacket) == 0)
  471. req->zero = 0;
  472. else
  473. req->zero = 1;
  474. /* use zlp framing on tx for strict CDC-Ether conformance,
  475. * though any robust network rx path ignores extra padding.
  476. * and some hardware doesn't like to write zlps.
  477. */
  478. if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
  479. length++;
  480. req->length = length;
  481. retval = usb_ep_queue(in, req, GFP_ATOMIC);
  482. switch (retval) {
  483. default:
  484. DBG(dev, "tx queue err %d\n", retval);
  485. break;
  486. case 0:
  487. atomic_inc(&dev->tx_qlen);
  488. }
  489. if (retval) {
  490. #if !USE_LWIP
  491. pxBufferDescriptor = (NetworkBufferDescriptor_t *)req->context;
  492. if (pxBufferDescriptor) {
  493. if (pxBufferDescriptor->pucEthernetBuffer) {
  494. vPortFree(pxBufferDescriptor->pucEthernetBuffer);
  495. pxBufferDescriptor->pucEthernetBuffer = NULL;
  496. }
  497. vReleaseNetworkBufferAndDescriptor(pxBufferDescriptor);
  498. }
  499. #else
  500. pxBufferDescriptor = (struct pbuf*)req->context;
  501. if (pxBufferDescriptor) {
  502. pbuf_free(pxBufferDescriptor);
  503. }
  504. #endif
  505. drop:
  506. spin_lock_irqsave(&dev->req_lock, flags);
  507. list_add_tail(&req->list, &dev->tx_reqs);
  508. spin_unlock_irqrestore(&dev->req_lock, flags);
  509. }
  510. return 0;
  511. }
  512. static void usb_ether_task_proc(void* arg)
  513. {
  514. struct eth_event ev;
  515. struct eth_dev *dev = (struct eth_dev *)arg;
  516. while(dev->start) {
  517. ev.type = -1;
  518. if (xQueueReceive(dev->usb_ether_event_queue, &ev, portMAX_DELAY) != pdPASS) {
  519. printf("%s xQueueReceive err!\r\n", __func__);
  520. continue;
  521. }
  522. if (ev.type == -1)
  523. continue;
  524. switch(ev.type) {
  525. case USB_ETH_EVENT_USB_DATA_RX:
  526. if (NULL != ev.priv)
  527. usb_data_rx_proc(dev, (struct usb_request *)ev.priv);
  528. break;
  529. case USB_ETH_EVENT_USB_DATA_TX:
  530. if (NULL != ev.priv)
  531. usb_data_tx_proc(dev, (struct usb_request *)ev.priv);
  532. break;
  533. case USB_ETH_EVENT_NET_DATA_TX:
  534. if (NULL != ev.priv)
  535. net_data_tx_proc(dev, ev.priv);
  536. break;
  537. case USB_ETH_EVENT_USB_DISCONNECT:
  538. if (NULL != ev.priv)
  539. ether_disconnect((struct gether *)ev.priv);
  540. break;
  541. case USB_ETH_EVENT_USB_CONNECT:
  542. if (NULL != ev.priv)
  543. ether_connect(dev, (struct gether *)ev.priv);
  544. break;
  545. default:
  546. break;
  547. }
  548. }
  549. vTaskDelete(NULL);
  550. }
  551. /**
  552. * gether_setup - initialize one ethernet-over-usb link
  553. * @g: gadget to associated with these links
  554. * @ethaddr: NULL, or a buffer in which the ethernet address of the
  555. * host side of the link is recorded
  556. * Context: may sleep
  557. *
  558. * This sets up the single network link that may be exported by a
  559. * gadget driver using this framework. The link layer addresses are
  560. * set up using module parameters.
  561. *
  562. * Returns negative errno, or zero on success
  563. */
  564. #if 0
  565. UBaseType_t uxRand( void );
  566. static void random_ether_addr(u8 *addr)
  567. {
  568. UBaseType_t val = uxRand();
  569. if (NULL == addr)
  570. return;
  571. addr [0] = ((val >> 0) & 0xff);
  572. addr [1] = ((val >> 8) & 0xff);
  573. addr [2] = ((val >> 16) & 0xff);
  574. addr [3] = ((val >> 24) & 0xff);
  575. val = uxRand();
  576. addr [4] = ((val >> 0) & 0xff);
  577. addr [5] = ((val >> 8) & 0xff);
  578. addr [0] &= 0xfe; /* clear multicast bit */
  579. addr [0] |= 0x02; /* set local assignment bit (IEEE802) */
  580. }
  581. #endif
  582. int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
  583. {
  584. struct eth_dev *dev;
  585. BaseType_t ret = pdFAIL;
  586. if (the_dev)
  587. return -EBUSY;
  588. dev = pvPortMalloc(sizeof(struct eth_dev));
  589. if (dev == NULL)
  590. return -ENOMEM;
  591. memset((void*)dev, 0, sizeof(struct eth_dev));
  592. spin_lock_init(&dev->lock);
  593. spin_lock_init(&dev->req_lock);
  594. INIT_LIST_HEAD(&dev->tx_reqs);
  595. //INIT_LIST_HEAD(&dev->rx_reqs);
  596. #if !USE_LWIP
  597. INIT_LIST_HEAD(&dev->rx_frames);
  598. #else
  599. dev->rx_frames.next = NULL;
  600. dev->rx_frames.len = 0;
  601. dev->rx_frames.tot_len = 0;
  602. #endif
  603. #if 1
  604. dev->host_mac[0] = 0x00;//00:0c:29:53:02:09
  605. dev->host_mac[1] = 0x0c;
  606. dev->host_mac[2] = 0x29;
  607. dev->host_mac[3] = 0x53;
  608. dev->host_mac[4] = 0x02;
  609. dev->host_mac[5] = 0x09;
  610. #else
  611. random_ether_addr(dev->host_mac);
  612. #endif
  613. //FreeRTOS_UpdateMACAddress(dev->host_mac);
  614. printf("\r\nncm mac: %02x %02x %02x %02x %02x %02x\r\n", dev->host_mac[0], dev->host_mac[1],
  615. dev->host_mac[2], dev->host_mac[3], dev->host_mac[4], dev->host_mac[5]);
  616. if (ethaddr)
  617. memcpy(ethaddr, dev->host_mac, ETH_ALEN);
  618. dev->usb_ether_event_queue = xQueueCreate(16, sizeof(struct eth_event));
  619. if (NULL == dev->usb_ether_event_queue) {
  620. goto exit;
  621. }
  622. dev->start = 1;
  623. ret = xTaskCreate(usb_ether_task_proc, "usb_ether_task", 2048, (void*)dev, configMAX_PRIORITIES - 3, &dev->usb_ether_task);
  624. if (ret != pdPASS) {
  625. goto exit;
  626. }
  627. dev->gadget = g;
  628. the_dev = dev;
  629. return 0;
  630. exit:
  631. if (ret != pdPASS) {
  632. if (dev) {
  633. if (dev->usb_ether_event_queue) {
  634. vQueueDelete(dev->usb_ether_event_queue);
  635. }
  636. vPortFree(dev);
  637. }
  638. }
  639. return -1;
  640. }
  641. /**
  642. * gether_cleanup - remove Ethernet-over-USB device
  643. * Context: may sleep
  644. *
  645. * This is called to free all resources allocated by @gether_setup().
  646. */
  647. void gether_cleanup(void)
  648. {
  649. struct eth_event ev;
  650. struct eth_dev *dev = the_dev;
  651. if (!dev)
  652. return;
  653. dev->start = 0;
  654. if (dev->usb_ether_event_queue) {
  655. ev.type = -1;
  656. xQueueSend(dev->usb_ether_event_queue, &ev, 0);
  657. vQueueDelete(dev->usb_ether_event_queue);
  658. }//maybe exist bug
  659. vPortFree(dev);
  660. the_dev = NULL;
  661. }
  662. static int ether_connect(struct eth_dev *dev, struct gether *link)
  663. {
  664. int result = 0;
  665. if (!dev)
  666. return -EINVAL;
  667. dev->tx_err_count = 0;
  668. if (link && link->connected)
  669. return 0;
  670. link->in_ep->driver_data = dev;
  671. result = usb_ep_enable(link->in_ep, link->in);
  672. if (result != 0) {
  673. DBG(dev, "enable %s --> %d\n",
  674. link->in_ep->name, result);
  675. goto fail0;
  676. }
  677. link->out_ep->driver_data = dev;
  678. result = usb_ep_enable(link->out_ep, link->out);
  679. if (result != 0) {
  680. DBG(dev, "enable %s --> %d\n",
  681. link->out_ep->name, result);
  682. goto fail1;
  683. }
  684. if (result == 0) {
  685. result = alloc_requests(dev, link, qlen(dev->gadget));
  686. dev->out_req = NULL;
  687. dev->out_req = usb_ep_alloc_request(link->out_ep, GFP_ATOMIC);
  688. if (!dev->out_req) {
  689. printf("usb_ep_alloc_request %s --> %d\r\n",
  690. link->out_ep->name, result);
  691. goto fail1;
  692. }
  693. dev->out_req->buf = NULL;
  694. dev->out_req->length = 0;
  695. }
  696. if (result == 0 && dev->out_req) {
  697. dev->zlp = link->is_zlp_ok;
  698. DBG(dev, "qlen %d\n", qlen(dev->gadget));
  699. dev->header_len = link->header_len;
  700. spin_lock(&dev->lock);
  701. dev->port_usb = link;
  702. link->ctx = (void *)dev;
  703. spin_unlock(&dev->lock);
  704. rx_fill(dev, 0);
  705. atomic_set(&dev->tx_qlen, 0);
  706. link->connected = true;
  707. /* on error, disable any endpoints */
  708. } else {
  709. (void) usb_ep_disable(link->out_ep);
  710. fail1:
  711. (void) usb_ep_disable(link->in_ep);
  712. }
  713. fail0:
  714. /* caller is responsible for cleanup on error */
  715. if (result < 0)
  716. return result;
  717. return result;
  718. }
  719. /**
  720. * gether_disconnect - notify network layer that USB link is inactive
  721. * @link: the USB link, on which gether_connect() was called
  722. * Context: irqs blocked
  723. *
  724. * This is called to deactivate endpoints and let the network layer know
  725. * the connection went inactive ("no carrier").
  726. *
  727. * On return, the state is as if gether_connect() had never been called.
  728. * The endpoints are inactive, and accordingly without active USB I/O.
  729. * Pointers to endpoint descriptors and endpoint private data are nulled.
  730. */
  731. static void ether_disconnect(struct gether *link)
  732. {
  733. struct eth_dev *dev = (struct eth_dev *)link->ctx;
  734. struct usb_request *req;
  735. if (!dev || !link->connected)
  736. return;
  737. dev->tx_err_count = 0;
  738. link->connected = false;
  739. printf("%s:%d start\r\n", __func__, __LINE__);
  740. usb_ep_disable(link->in_ep);
  741. spin_lock(&dev->req_lock);
  742. while (!list_empty(&dev->tx_reqs)) {
  743. req = listGET_LIST_ITEM_OWNER(listGET_HEAD_ENTRY(&dev->tx_reqs));
  744. list_del(&req->list);
  745. spin_unlock(&dev->req_lock);
  746. req->buf = NULL;
  747. req->length = 0;
  748. usb_ep_free_request(link->in_ep, req);
  749. printf("req:%x is released at disconnect\n", req);
  750. spin_lock(&dev->req_lock);
  751. }
  752. spin_unlock(&dev->req_lock);
  753. link->in_ep->driver_data = NULL;
  754. link->in_ep->desc = NULL;
  755. usb_ep_disable(link->out_ep);
  756. spin_lock(&dev->req_lock);
  757. #if !USE_LWIP
  758. while (!list_empty(&dev->rx_frames)) {
  759. NetworkBufferDescriptor_t * pxNetworkBuffer = listGET_LIST_ITEM_OWNER(listGET_HEAD_ENTRY(&dev->rx_frames));
  760. list_del_init(&pxNetworkBuffer->xBufferListItem);
  761. vReleaseNetworkBufferAndDescriptor(pxNetworkBuffer);
  762. }
  763. #else
  764. if (dev->rx_frames.next) {
  765. struct pbuf *header = dev->rx_frames.next;
  766. while(header != NULL) {
  767. struct pbuf *current = header;
  768. header = header->next;
  769. pbuf_free(current);
  770. }
  771. dev->rx_frames.next = NULL;
  772. }
  773. #endif
  774. req = dev->out_req;
  775. if (req && req->buf) {
  776. vPortFree(req->buf);
  777. req->buf = NULL;
  778. req->length = 0;
  779. }
  780. usb_ep_free_request(link->out_ep, dev->out_req);
  781. spin_unlock(&dev->req_lock);
  782. link->out_ep->driver_data = NULL;
  783. link->out_ep->desc = NULL;
  784. /* finish forgetting about this USB link episode */
  785. dev->header_len = 0;
  786. spin_lock(&dev->lock);
  787. dev->port_usb = NULL;
  788. spin_unlock(&dev->lock);
  789. if (link && link->disconnect_cb)
  790. link->disconnect_cb(link);
  791. printf("%s:%d end\r\n", __func__, __LINE__);
  792. }
  793. int gether_connect(struct gether *link)
  794. {
  795. struct eth_event ev;
  796. struct eth_dev *dev = the_dev;
  797. if (dev && dev->usb_ether_event_queue) {
  798. ev.type = USB_ETH_EVENT_USB_CONNECT;
  799. ev.priv = (void*)link;
  800. if (!xPortIsInInterrupt())
  801. xQueueSend(dev->usb_ether_event_queue, &ev, 0);
  802. else
  803. xQueueSendFromISR(dev->usb_ether_event_queue, &ev, 0);
  804. }
  805. return 0;
  806. }
  807. void gether_disconnect(struct gether *link)
  808. {
  809. struct eth_event ev;
  810. struct eth_dev *dev = (struct eth_dev *)link->ctx;
  811. if (dev && dev->usb_ether_event_queue) {
  812. ev.type = USB_ETH_EVENT_USB_DISCONNECT;
  813. ev.priv = (void*)link;
  814. if (!xPortIsInInterrupt())
  815. xQueueSend(dev->usb_ether_event_queue, &ev, 0);
  816. else
  817. xQueueSendFromISR(dev->usb_ether_event_queue, &ev, 0);
  818. }
  819. }
  820. void gether_send(NetworkBufferDescriptor_t * const pxDescriptor)
  821. {
  822. struct eth_event ev;
  823. struct eth_dev *dev = the_dev;
  824. if (dev && dev->usb_ether_event_queue) {
  825. ev.type = USB_ETH_EVENT_NET_DATA_TX;
  826. ev.priv = (void*)pxDescriptor;
  827. xQueueSend(dev->usb_ether_event_queue, &ev, 0);
  828. } else {
  829. #if !USE_LWIP
  830. vReleaseNetworkBufferAndDescriptor( pxDescriptor );
  831. #endif
  832. }
  833. }
  834. void gether_send_ext(void * const pxDescriptor)
  835. {
  836. struct eth_event ev;
  837. struct eth_dev *dev = the_dev;
  838. if (dev && dev->usb_ether_event_queue) {
  839. ev.type = USB_ETH_EVENT_NET_DATA_TX;
  840. ev.priv = (void*)pxDescriptor;
  841. xQueueSend(dev->usb_ether_event_queue, &ev, 0);
  842. } else {
  843. #if !USE_LWIP
  844. vReleaseNetworkBufferAndDescriptor( pxDescriptor );
  845. #endif
  846. }
  847. }