stub_rx.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2003-2008 Takahiro Hirofuchi
  4. */
  5. #include <asm/byteorder.h>
  6. #include <linux/kthread.h>
  7. #include <linux/usb.h>
  8. #include <linux/usb/hcd.h>
  9. #include <linux/scatterlist.h>
  10. #include "usbip_common.h"
  11. #include "stub.h"
  12. static int is_clear_halt_cmd(struct urb *urb)
  13. {
  14. struct usb_ctrlrequest *req;
  15. req = (struct usb_ctrlrequest *) urb->setup_packet;
  16. return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
  17. (req->bRequestType == USB_RECIP_ENDPOINT) &&
  18. (req->wValue == USB_ENDPOINT_HALT);
  19. }
  20. static int is_set_interface_cmd(struct urb *urb)
  21. {
  22. struct usb_ctrlrequest *req;
  23. req = (struct usb_ctrlrequest *) urb->setup_packet;
  24. return (req->bRequest == USB_REQ_SET_INTERFACE) &&
  25. (req->bRequestType == USB_RECIP_INTERFACE);
  26. }
  27. static int is_set_configuration_cmd(struct urb *urb)
  28. {
  29. struct usb_ctrlrequest *req;
  30. req = (struct usb_ctrlrequest *) urb->setup_packet;
  31. return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
  32. (req->bRequestType == USB_RECIP_DEVICE);
  33. }
  34. static int is_reset_device_cmd(struct urb *urb)
  35. {
  36. struct usb_ctrlrequest *req;
  37. __u16 value;
  38. __u16 index;
  39. req = (struct usb_ctrlrequest *) urb->setup_packet;
  40. value = le16_to_cpu(req->wValue);
  41. index = le16_to_cpu(req->wIndex);
  42. if ((req->bRequest == USB_REQ_SET_FEATURE) &&
  43. (req->bRequestType == USB_RT_PORT) &&
  44. (value == USB_PORT_FEAT_RESET)) {
  45. usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
  46. return 1;
  47. } else
  48. return 0;
  49. }
  50. static int tweak_clear_halt_cmd(struct urb *urb)
  51. {
  52. struct usb_ctrlrequest *req;
  53. int target_endp;
  54. int target_dir;
  55. int target_pipe;
  56. int ret;
  57. req = (struct usb_ctrlrequest *) urb->setup_packet;
  58. /*
  59. * The stalled endpoint is specified in the wIndex value. The endpoint
  60. * of the urb is the target of this clear_halt request (i.e., control
  61. * endpoint).
  62. */
  63. target_endp = le16_to_cpu(req->wIndex) & 0x000f;
  64. /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
  65. target_dir = le16_to_cpu(req->wIndex) & 0x0080;
  66. if (target_dir)
  67. target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
  68. else
  69. target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
  70. ret = usb_clear_halt(urb->dev, target_pipe);
  71. if (ret < 0)
  72. dev_err(&urb->dev->dev,
  73. "usb_clear_halt error: devnum %d endp %d ret %d\n",
  74. urb->dev->devnum, target_endp, ret);
  75. else
  76. dev_info(&urb->dev->dev,
  77. "usb_clear_halt done: devnum %d endp %d\n",
  78. urb->dev->devnum, target_endp);
  79. return ret;
  80. }
  81. static int tweak_set_interface_cmd(struct urb *urb)
  82. {
  83. struct usb_ctrlrequest *req;
  84. __u16 alternate;
  85. __u16 interface;
  86. int ret;
  87. req = (struct usb_ctrlrequest *) urb->setup_packet;
  88. alternate = le16_to_cpu(req->wValue);
  89. interface = le16_to_cpu(req->wIndex);
  90. usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
  91. interface, alternate);
  92. ret = usb_set_interface(urb->dev, interface, alternate);
  93. if (ret < 0)
  94. dev_err(&urb->dev->dev,
  95. "usb_set_interface error: inf %u alt %u ret %d\n",
  96. interface, alternate, ret);
  97. else
  98. dev_info(&urb->dev->dev,
  99. "usb_set_interface done: inf %u alt %u\n",
  100. interface, alternate);
  101. return ret;
  102. }
  103. static int tweak_set_configuration_cmd(struct urb *urb)
  104. {
  105. struct stub_priv *priv = (struct stub_priv *) urb->context;
  106. struct stub_device *sdev = priv->sdev;
  107. struct usb_ctrlrequest *req;
  108. __u16 config;
  109. int err;
  110. req = (struct usb_ctrlrequest *) urb->setup_packet;
  111. config = le16_to_cpu(req->wValue);
  112. usb_lock_device(sdev->udev);
  113. err = usb_set_configuration(sdev->udev, config);
  114. usb_unlock_device(sdev->udev);
  115. if (err && err != -ENODEV)
  116. dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
  117. config, err);
  118. return err;
  119. }
  120. static int tweak_reset_device_cmd(struct urb *urb)
  121. {
  122. struct stub_priv *priv = (struct stub_priv *) urb->context;
  123. struct stub_device *sdev = priv->sdev;
  124. int err;
  125. dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
  126. err = usb_lock_device_for_reset(sdev->udev, NULL);
  127. if (err < 0) {
  128. dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
  129. return err;
  130. }
  131. err = usb_reset_device(sdev->udev);
  132. usb_unlock_device(sdev->udev);
  133. return err;
  134. }
  135. /*
  136. * clear_halt, set_interface, and set_configuration require special tricks.
  137. * Returns 1 if request was tweaked, 0 otherwise.
  138. */
  139. static int tweak_special_requests(struct urb *urb)
  140. {
  141. int err;
  142. if (!urb || !urb->setup_packet)
  143. return 0;
  144. if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
  145. return 0;
  146. if (is_clear_halt_cmd(urb))
  147. /* tweak clear_halt */
  148. err = tweak_clear_halt_cmd(urb);
  149. else if (is_set_interface_cmd(urb))
  150. /* tweak set_interface */
  151. err = tweak_set_interface_cmd(urb);
  152. else if (is_set_configuration_cmd(urb))
  153. /* tweak set_configuration */
  154. err = tweak_set_configuration_cmd(urb);
  155. else if (is_reset_device_cmd(urb))
  156. err = tweak_reset_device_cmd(urb);
  157. else {
  158. usbip_dbg_stub_rx("no need to tweak\n");
  159. return 0;
  160. }
  161. return !err;
  162. }
  163. /*
  164. * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
  165. * By unlinking the urb asynchronously, stub_rx can continuously
  166. * process coming urbs. Even if the urb is unlinked, its completion
  167. * handler will be called and stub_tx will send a return pdu.
  168. *
  169. * See also comments about unlinking strategy in vhci_hcd.c.
  170. */
  171. static int stub_recv_cmd_unlink(struct stub_device *sdev,
  172. struct usbip_header *pdu)
  173. {
  174. int ret, i;
  175. unsigned long flags;
  176. struct stub_priv *priv;
  177. spin_lock_irqsave(&sdev->priv_lock, flags);
  178. list_for_each_entry(priv, &sdev->priv_init, list) {
  179. if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
  180. continue;
  181. /*
  182. * This matched urb is not completed yet (i.e., be in
  183. * flight in usb hcd hardware/driver). Now we are
  184. * cancelling it. The unlinking flag means that we are
  185. * now not going to return the normal result pdu of a
  186. * submission request, but going to return a result pdu
  187. * of the unlink request.
  188. */
  189. priv->unlinking = 1;
  190. /*
  191. * In the case that unlinking flag is on, prev->seqnum
  192. * is changed from the seqnum of the cancelling urb to
  193. * the seqnum of the unlink request. This will be used
  194. * to make the result pdu of the unlink request.
  195. */
  196. priv->seqnum = pdu->base.seqnum;
  197. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  198. /*
  199. * usb_unlink_urb() is now out of spinlocking to avoid
  200. * spinlock recursion since stub_complete() is
  201. * sometimes called in this context but not in the
  202. * interrupt context. If stub_complete() is executed
  203. * before we call usb_unlink_urb(), usb_unlink_urb()
  204. * will return an error value. In this case, stub_tx
  205. * will return the result pdu of this unlink request
  206. * though submission is completed and actual unlinking
  207. * is not executed. OK?
  208. */
  209. /* In the above case, urb->status is not -ECONNRESET,
  210. * so a driver in a client host will know the failure
  211. * of the unlink request ?
  212. */
  213. for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
  214. ret = usb_unlink_urb(priv->urbs[i]);
  215. if (ret != -EINPROGRESS)
  216. dev_err(&priv->urbs[i]->dev->dev,
  217. "failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
  218. i + 1, priv->num_urbs,
  219. priv->seqnum, ret);
  220. }
  221. return 0;
  222. }
  223. usbip_dbg_stub_rx("seqnum %d is not pending\n",
  224. pdu->u.cmd_unlink.seqnum);
  225. /*
  226. * The urb of the unlink target is not found in priv_init queue. It was
  227. * already completed and its results is/was going to be sent by a
  228. * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
  229. * return the completeness of this unlink request to vhci_hcd.
  230. */
  231. stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
  232. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  233. return 0;
  234. }
  235. static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
  236. {
  237. struct usbip_device *ud = &sdev->ud;
  238. int valid = 0;
  239. if (pdu->base.devid == sdev->devid) {
  240. spin_lock_irq(&ud->lock);
  241. if (ud->status == SDEV_ST_USED) {
  242. /* A request is valid. */
  243. valid = 1;
  244. }
  245. spin_unlock_irq(&ud->lock);
  246. }
  247. return valid;
  248. }
  249. static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
  250. struct usbip_header *pdu)
  251. {
  252. struct stub_priv *priv;
  253. struct usbip_device *ud = &sdev->ud;
  254. unsigned long flags;
  255. spin_lock_irqsave(&sdev->priv_lock, flags);
  256. priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
  257. if (!priv) {
  258. dev_err(&sdev->udev->dev, "alloc stub_priv\n");
  259. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  260. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  261. return NULL;
  262. }
  263. priv->seqnum = pdu->base.seqnum;
  264. priv->sdev = sdev;
  265. /*
  266. * After a stub_priv is linked to a list_head,
  267. * our error handler can free allocated data.
  268. */
  269. list_add_tail(&priv->list, &sdev->priv_init);
  270. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  271. return priv;
  272. }
  273. static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
  274. {
  275. struct usb_device *udev = sdev->udev;
  276. struct usb_host_endpoint *ep;
  277. struct usb_endpoint_descriptor *epd = NULL;
  278. int epnum = pdu->base.ep;
  279. int dir = pdu->base.direction;
  280. if (epnum < 0 || epnum > 15)
  281. goto err_ret;
  282. if (dir == USBIP_DIR_IN)
  283. ep = udev->ep_in[epnum & 0x7f];
  284. else
  285. ep = udev->ep_out[epnum & 0x7f];
  286. if (!ep)
  287. goto err_ret;
  288. epd = &ep->desc;
  289. if (usb_endpoint_xfer_control(epd)) {
  290. if (dir == USBIP_DIR_OUT)
  291. return usb_sndctrlpipe(udev, epnum);
  292. else
  293. return usb_rcvctrlpipe(udev, epnum);
  294. }
  295. if (usb_endpoint_xfer_bulk(epd)) {
  296. if (dir == USBIP_DIR_OUT)
  297. return usb_sndbulkpipe(udev, epnum);
  298. else
  299. return usb_rcvbulkpipe(udev, epnum);
  300. }
  301. if (usb_endpoint_xfer_int(epd)) {
  302. if (dir == USBIP_DIR_OUT)
  303. return usb_sndintpipe(udev, epnum);
  304. else
  305. return usb_rcvintpipe(udev, epnum);
  306. }
  307. if (usb_endpoint_xfer_isoc(epd)) {
  308. /* validate number of packets */
  309. if (pdu->u.cmd_submit.number_of_packets < 0 ||
  310. pdu->u.cmd_submit.number_of_packets >
  311. USBIP_MAX_ISO_PACKETS) {
  312. dev_err(&sdev->udev->dev,
  313. "CMD_SUBMIT: isoc invalid num packets %d\n",
  314. pdu->u.cmd_submit.number_of_packets);
  315. return -1;
  316. }
  317. if (dir == USBIP_DIR_OUT)
  318. return usb_sndisocpipe(udev, epnum);
  319. else
  320. return usb_rcvisocpipe(udev, epnum);
  321. }
  322. err_ret:
  323. /* NOT REACHED */
  324. dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
  325. return -1;
  326. }
  327. static void masking_bogus_flags(struct urb *urb)
  328. {
  329. int xfertype;
  330. struct usb_device *dev;
  331. struct usb_host_endpoint *ep;
  332. int is_out;
  333. unsigned int allowed;
  334. if (!urb || urb->hcpriv || !urb->complete)
  335. return;
  336. dev = urb->dev;
  337. if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
  338. return;
  339. ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
  340. [usb_pipeendpoint(urb->pipe)];
  341. if (!ep)
  342. return;
  343. xfertype = usb_endpoint_type(&ep->desc);
  344. if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
  345. struct usb_ctrlrequest *setup =
  346. (struct usb_ctrlrequest *) urb->setup_packet;
  347. if (!setup)
  348. return;
  349. is_out = !(setup->bRequestType & USB_DIR_IN) ||
  350. !setup->wLength;
  351. } else {
  352. is_out = usb_endpoint_dir_out(&ep->desc);
  353. }
  354. /* enforce simple/standard policy */
  355. allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
  356. URB_DIR_MASK | URB_FREE_BUFFER);
  357. switch (xfertype) {
  358. case USB_ENDPOINT_XFER_BULK:
  359. if (is_out)
  360. allowed |= URB_ZERO_PACKET;
  361. fallthrough;
  362. default: /* all non-iso endpoints */
  363. if (!is_out)
  364. allowed |= URB_SHORT_NOT_OK;
  365. break;
  366. case USB_ENDPOINT_XFER_ISOC:
  367. allowed |= URB_ISO_ASAP;
  368. break;
  369. }
  370. urb->transfer_flags &= allowed;
  371. }
  372. static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
  373. {
  374. int ret;
  375. int i;
  376. for (i = 0; i < priv->num_urbs; i++) {
  377. ret = usbip_recv_xbuff(ud, priv->urbs[i]);
  378. if (ret < 0)
  379. break;
  380. }
  381. return ret;
  382. }
  383. static void stub_recv_cmd_submit(struct stub_device *sdev,
  384. struct usbip_header *pdu)
  385. {
  386. struct stub_priv *priv;
  387. struct usbip_device *ud = &sdev->ud;
  388. struct usb_device *udev = sdev->udev;
  389. struct scatterlist *sgl = NULL, *sg;
  390. void *buffer = NULL;
  391. unsigned long long buf_len;
  392. int nents;
  393. int num_urbs = 1;
  394. int pipe = get_pipe(sdev, pdu);
  395. int use_sg = pdu->u.cmd_submit.transfer_flags & USBIP_URB_DMA_MAP_SG;
  396. int support_sg = 1;
  397. int np = 0;
  398. int ret, i;
  399. int is_tweaked;
  400. if (pipe == -1)
  401. return;
  402. /*
  403. * Smatch reported the error case where use_sg is true and buf_len is 0.
  404. * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
  405. * released by stub event handler and connection will be shut down.
  406. */
  407. priv = stub_priv_alloc(sdev, pdu);
  408. if (!priv)
  409. return;
  410. buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
  411. if (use_sg && !buf_len) {
  412. dev_err(&udev->dev, "sg buffer with zero length\n");
  413. goto err_malloc;
  414. }
  415. /* allocate urb transfer buffer, if needed */
  416. if (buf_len) {
  417. if (use_sg) {
  418. sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
  419. if (!sgl)
  420. goto err_malloc;
  421. /* Check if the server's HCD supports SG */
  422. if (!udev->bus->sg_tablesize) {
  423. /*
  424. * If the server's HCD doesn't support SG, break
  425. * a single SG request into several URBs and map
  426. * each SG list entry to corresponding URB
  427. * buffer. The previously allocated SG list is
  428. * stored in priv->sgl (If the server's HCD
  429. * support SG, SG list is stored only in
  430. * urb->sg) and it is used as an indicator that
  431. * the server split single SG request into
  432. * several URBs. Later, priv->sgl is used by
  433. * stub_complete() and stub_send_ret_submit() to
  434. * reassemble the divied URBs.
  435. */
  436. support_sg = 0;
  437. num_urbs = nents;
  438. priv->completed_urbs = 0;
  439. pdu->u.cmd_submit.transfer_flags &=
  440. ~USBIP_URB_DMA_MAP_SG;
  441. }
  442. } else {
  443. buffer = kzalloc(buf_len, GFP_KERNEL);
  444. if (!buffer)
  445. goto err_malloc;
  446. }
  447. }
  448. /* allocate urb array */
  449. priv->num_urbs = num_urbs;
  450. priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
  451. if (!priv->urbs)
  452. goto err_urbs;
  453. /* setup a urb */
  454. if (support_sg) {
  455. if (usb_pipeisoc(pipe))
  456. np = pdu->u.cmd_submit.number_of_packets;
  457. priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
  458. if (!priv->urbs[0])
  459. goto err_urb;
  460. if (buf_len) {
  461. if (use_sg) {
  462. priv->urbs[0]->sg = sgl;
  463. priv->urbs[0]->num_sgs = nents;
  464. priv->urbs[0]->transfer_buffer = NULL;
  465. } else {
  466. priv->urbs[0]->transfer_buffer = buffer;
  467. }
  468. }
  469. /* copy urb setup packet */
  470. priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
  471. 8, GFP_KERNEL);
  472. if (!priv->urbs[0]->setup_packet) {
  473. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  474. return;
  475. }
  476. usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
  477. } else {
  478. for_each_sg(sgl, sg, nents, i) {
  479. priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  480. /* The URBs which is previously allocated will be freed
  481. * in stub_device_cleanup_urbs() if error occurs.
  482. */
  483. if (!priv->urbs[i])
  484. goto err_urb;
  485. usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
  486. priv->urbs[i]->transfer_buffer = sg_virt(sg);
  487. priv->urbs[i]->transfer_buffer_length = sg->length;
  488. }
  489. priv->sgl = sgl;
  490. }
  491. for (i = 0; i < num_urbs; i++) {
  492. /* set other members from the base header of pdu */
  493. priv->urbs[i]->context = (void *) priv;
  494. priv->urbs[i]->dev = udev;
  495. priv->urbs[i]->pipe = pipe;
  496. priv->urbs[i]->complete = stub_complete;
  497. /*
  498. * all URBs belong to a single PDU, so a global is_tweaked flag is
  499. * enough
  500. */
  501. is_tweaked = tweak_special_requests(priv->urbs[i]);
  502. masking_bogus_flags(priv->urbs[i]);
  503. }
  504. if (stub_recv_xbuff(ud, priv) < 0)
  505. return;
  506. if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
  507. return;
  508. /* urb is now ready to submit */
  509. for (i = 0; i < priv->num_urbs; i++) {
  510. if (!is_tweaked) {
  511. ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
  512. if (ret == 0)
  513. usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
  514. pdu->base.seqnum);
  515. else {
  516. dev_err(&udev->dev, "submit_urb error, %d\n", ret);
  517. usbip_dump_header(pdu);
  518. usbip_dump_urb(priv->urbs[i]);
  519. /*
  520. * Pessimistic.
  521. * This connection will be discarded.
  522. */
  523. usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
  524. break;
  525. }
  526. } else {
  527. /*
  528. * An identical URB was already submitted in
  529. * tweak_special_requests(). Skip submitting this URB to not
  530. * duplicate the request.
  531. */
  532. priv->urbs[i]->status = 0;
  533. stub_complete(priv->urbs[i]);
  534. }
  535. }
  536. usbip_dbg_stub_rx("Leave\n");
  537. return;
  538. err_urb:
  539. kfree(priv->urbs);
  540. err_urbs:
  541. kfree(buffer);
  542. sgl_free(sgl);
  543. err_malloc:
  544. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  545. }
  546. /* recv a pdu */
  547. static void stub_rx_pdu(struct usbip_device *ud)
  548. {
  549. int ret;
  550. struct usbip_header pdu;
  551. struct stub_device *sdev = container_of(ud, struct stub_device, ud);
  552. struct device *dev = &sdev->udev->dev;
  553. usbip_dbg_stub_rx("Enter\n");
  554. memset(&pdu, 0, sizeof(pdu));
  555. /* receive a pdu header */
  556. ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
  557. if (ret != sizeof(pdu)) {
  558. dev_err(dev, "recv a header, %d\n", ret);
  559. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  560. return;
  561. }
  562. usbip_header_correct_endian(&pdu, 0);
  563. if (usbip_dbg_flag_stub_rx)
  564. usbip_dump_header(&pdu);
  565. if (!valid_request(sdev, &pdu)) {
  566. dev_err(dev, "recv invalid request\n");
  567. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  568. return;
  569. }
  570. switch (pdu.base.command) {
  571. case USBIP_CMD_UNLINK:
  572. stub_recv_cmd_unlink(sdev, &pdu);
  573. break;
  574. case USBIP_CMD_SUBMIT:
  575. stub_recv_cmd_submit(sdev, &pdu);
  576. break;
  577. default:
  578. /* NOTREACHED */
  579. dev_err(dev, "unknown pdu\n");
  580. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  581. break;
  582. }
  583. }
  584. int stub_rx_loop(void *data)
  585. {
  586. struct usbip_device *ud = data;
  587. while (!kthread_should_stop()) {
  588. if (usbip_event_happened(ud))
  589. break;
  590. stub_rx_pdu(ud);
  591. }
  592. return 0;
  593. }