netback.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <linux/highmem.h>
  39. #include <net/tcp.h>
  40. #include <xen/xen.h>
  41. #include <xen/events.h>
  42. #include <xen/interface/memory.h>
  43. #include <xen/page.h>
  44. #include <asm/xen/hypercall.h>
  45. /* Provide an option to disable split event channels at load time as
  46. * event channels are limited resource. Split event channels are
  47. * enabled by default.
  48. */
  49. bool separate_tx_rx_irq = true;
  50. module_param(separate_tx_rx_irq, bool, 0644);
  51. /* The time that packets can stay on the guest Rx internal queue
  52. * before they are dropped.
  53. */
  54. unsigned int rx_drain_timeout_msecs = 10000;
  55. module_param(rx_drain_timeout_msecs, uint, 0444);
  56. /* The length of time before the frontend is considered unresponsive
  57. * because it isn't providing Rx slots.
  58. */
  59. unsigned int rx_stall_timeout_msecs = 60000;
  60. module_param(rx_stall_timeout_msecs, uint, 0444);
  61. #define MAX_QUEUES_DEFAULT 8
  62. unsigned int xenvif_max_queues;
  63. module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  64. MODULE_PARM_DESC(max_queues,
  65. "Maximum number of queues per virtual interface");
  66. /*
  67. * This is the maximum slots a skb can have. If a guest sends a skb
  68. * which exceeds this limit it is considered malicious.
  69. */
  70. #define FATAL_SKB_SLOTS_DEFAULT 20
  71. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  72. module_param(fatal_skb_slots, uint, 0444);
  73. /* The amount to copy out of the first guest Tx slot into the skb's
  74. * linear area. If the first slot has more data, it will be mapped
  75. * and put into the first frag.
  76. *
  77. * This is sized to avoid pulling headers from the frags for most
  78. * TCP/IP packets.
  79. */
  80. #define XEN_NETBACK_TX_COPY_LEN 128
  81. /* This is the maximum number of flows in the hash cache. */
  82. #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
  83. unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
  84. module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
  85. MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
  86. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  87. u8 status);
  88. static void make_tx_response(struct xenvif_queue *queue,
  89. struct xen_netif_tx_request *txp,
  90. unsigned int extra_count,
  91. s8 st);
  92. static void push_tx_responses(struct xenvif_queue *queue);
  93. static inline int tx_work_todo(struct xenvif_queue *queue);
  94. static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
  95. u16 idx)
  96. {
  97. return page_to_pfn(queue->mmap_pages[idx]);
  98. }
  99. static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
  100. u16 idx)
  101. {
  102. return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
  103. }
  104. #define callback_param(vif, pending_idx) \
  105. (vif->pending_tx_info[pending_idx].callback_struct)
  106. /* Find the containing VIF's structure from a pointer in pending_tx_info array
  107. */
  108. static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
  109. {
  110. u16 pending_idx = ubuf->desc;
  111. struct pending_tx_info *temp =
  112. container_of(ubuf, struct pending_tx_info, callback_struct);
  113. return container_of(temp - pending_idx,
  114. struct xenvif_queue,
  115. pending_tx_info[0]);
  116. }
  117. static u16 frag_get_pending_idx(skb_frag_t *frag)
  118. {
  119. return (u16)frag->page_offset;
  120. }
  121. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  122. {
  123. frag->page_offset = pending_idx;
  124. }
  125. static inline pending_ring_idx_t pending_index(unsigned i)
  126. {
  127. return i & (MAX_PENDING_REQS-1);
  128. }
  129. void xenvif_kick_thread(struct xenvif_queue *queue)
  130. {
  131. wake_up(&queue->wq);
  132. }
  133. void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
  134. {
  135. int more_to_do;
  136. RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
  137. if (more_to_do)
  138. napi_schedule(&queue->napi);
  139. else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
  140. &queue->eoi_pending) &
  141. (NETBK_TX_EOI | NETBK_COMMON_EOI))
  142. xen_irq_lateeoi(queue->tx_irq, 0);
  143. }
  144. static void tx_add_credit(struct xenvif_queue *queue)
  145. {
  146. unsigned long max_burst, max_credit;
  147. /*
  148. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  149. * Otherwise the interface can seize up due to insufficient credit.
  150. */
  151. max_burst = max(131072UL, queue->credit_bytes);
  152. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  153. max_credit = queue->remaining_credit + queue->credit_bytes;
  154. if (max_credit < queue->remaining_credit)
  155. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  156. queue->remaining_credit = min(max_credit, max_burst);
  157. queue->rate_limited = false;
  158. }
  159. void xenvif_tx_credit_callback(struct timer_list *t)
  160. {
  161. struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
  162. tx_add_credit(queue);
  163. xenvif_napi_schedule_or_enable_events(queue);
  164. }
  165. static void xenvif_tx_err(struct xenvif_queue *queue,
  166. struct xen_netif_tx_request *txp,
  167. unsigned int extra_count, RING_IDX end)
  168. {
  169. RING_IDX cons = queue->tx.req_cons;
  170. unsigned long flags;
  171. do {
  172. spin_lock_irqsave(&queue->response_lock, flags);
  173. make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
  174. push_tx_responses(queue);
  175. spin_unlock_irqrestore(&queue->response_lock, flags);
  176. if (cons == end)
  177. break;
  178. RING_COPY_REQUEST(&queue->tx, cons++, txp);
  179. extra_count = 0; /* only the first frag can have extras */
  180. } while (1);
  181. queue->tx.req_cons = cons;
  182. }
  183. static void xenvif_fatal_tx_err(struct xenvif *vif)
  184. {
  185. netdev_err(vif->dev, "fatal error; disabling device\n");
  186. vif->disabled = true;
  187. /* Disable the vif from queue 0's kthread */
  188. if (vif->num_queues)
  189. xenvif_kick_thread(&vif->queues[0]);
  190. }
  191. static int xenvif_count_requests(struct xenvif_queue *queue,
  192. struct xen_netif_tx_request *first,
  193. unsigned int extra_count,
  194. struct xen_netif_tx_request *txp,
  195. int work_to_do)
  196. {
  197. RING_IDX cons = queue->tx.req_cons;
  198. int slots = 0;
  199. int drop_err = 0;
  200. int more_data;
  201. if (!(first->flags & XEN_NETTXF_more_data))
  202. return 0;
  203. do {
  204. struct xen_netif_tx_request dropped_tx = { 0 };
  205. if (slots >= work_to_do) {
  206. netdev_err(queue->vif->dev,
  207. "Asked for %d slots but exceeds this limit\n",
  208. work_to_do);
  209. xenvif_fatal_tx_err(queue->vif);
  210. return -ENODATA;
  211. }
  212. /* This guest is really using too many slots and
  213. * considered malicious.
  214. */
  215. if (unlikely(slots >= fatal_skb_slots)) {
  216. netdev_err(queue->vif->dev,
  217. "Malicious frontend using %d slots, threshold %u\n",
  218. slots, fatal_skb_slots);
  219. xenvif_fatal_tx_err(queue->vif);
  220. return -E2BIG;
  221. }
  222. /* Xen network protocol had implicit dependency on
  223. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  224. * the historical MAX_SKB_FRAGS value 18 to honor the
  225. * same behavior as before. Any packet using more than
  226. * 18 slots but less than fatal_skb_slots slots is
  227. * dropped
  228. */
  229. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  230. if (net_ratelimit())
  231. netdev_dbg(queue->vif->dev,
  232. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  233. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  234. drop_err = -E2BIG;
  235. }
  236. if (drop_err)
  237. txp = &dropped_tx;
  238. RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
  239. /* If the guest submitted a frame >= 64 KiB then
  240. * first->size overflowed and following slots will
  241. * appear to be larger than the frame.
  242. *
  243. * This cannot be fatal error as there are buggy
  244. * frontends that do this.
  245. *
  246. * Consume all slots and drop the packet.
  247. */
  248. if (!drop_err && txp->size > first->size) {
  249. if (net_ratelimit())
  250. netdev_dbg(queue->vif->dev,
  251. "Invalid tx request, slot size %u > remaining size %u\n",
  252. txp->size, first->size);
  253. drop_err = -EIO;
  254. }
  255. first->size -= txp->size;
  256. slots++;
  257. if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
  258. netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
  259. txp->offset, txp->size);
  260. xenvif_fatal_tx_err(queue->vif);
  261. return -EINVAL;
  262. }
  263. more_data = txp->flags & XEN_NETTXF_more_data;
  264. if (!drop_err)
  265. txp++;
  266. } while (more_data);
  267. if (drop_err) {
  268. xenvif_tx_err(queue, first, extra_count, cons + slots);
  269. return drop_err;
  270. }
  271. return slots;
  272. }
  273. struct xenvif_tx_cb {
  274. u16 pending_idx;
  275. };
  276. #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
  277. static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
  278. u16 pending_idx,
  279. struct xen_netif_tx_request *txp,
  280. unsigned int extra_count,
  281. struct gnttab_map_grant_ref *mop)
  282. {
  283. queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
  284. gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
  285. GNTMAP_host_map | GNTMAP_readonly,
  286. txp->gref, queue->vif->domid);
  287. memcpy(&queue->pending_tx_info[pending_idx].req, txp,
  288. sizeof(*txp));
  289. queue->pending_tx_info[pending_idx].extra_count = extra_count;
  290. }
  291. static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
  292. {
  293. struct sk_buff *skb =
  294. alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
  295. GFP_ATOMIC | __GFP_NOWARN);
  296. if (unlikely(skb == NULL))
  297. return NULL;
  298. /* Packets passed to netif_rx() must have some headroom. */
  299. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  300. /* Initialize it here to avoid later surprises */
  301. skb_shinfo(skb)->destructor_arg = NULL;
  302. return skb;
  303. }
  304. static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
  305. struct sk_buff *skb,
  306. struct xen_netif_tx_request *txp,
  307. struct gnttab_map_grant_ref *gop,
  308. unsigned int frag_overflow,
  309. struct sk_buff *nskb)
  310. {
  311. struct skb_shared_info *shinfo = skb_shinfo(skb);
  312. skb_frag_t *frags = shinfo->frags;
  313. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  314. int start;
  315. pending_ring_idx_t index;
  316. unsigned int nr_slots;
  317. nr_slots = shinfo->nr_frags;
  318. /* Skip first skb fragment if it is on same page as header fragment. */
  319. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  320. for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
  321. shinfo->nr_frags++, txp++, gop++) {
  322. index = pending_index(queue->pending_cons++);
  323. pending_idx = queue->pending_ring[index];
  324. xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
  325. frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
  326. }
  327. if (frag_overflow) {
  328. shinfo = skb_shinfo(nskb);
  329. frags = shinfo->frags;
  330. for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
  331. shinfo->nr_frags++, txp++, gop++) {
  332. index = pending_index(queue->pending_cons++);
  333. pending_idx = queue->pending_ring[index];
  334. xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
  335. gop);
  336. frag_set_pending_idx(&frags[shinfo->nr_frags],
  337. pending_idx);
  338. }
  339. skb_shinfo(skb)->frag_list = nskb;
  340. }
  341. return gop;
  342. }
  343. static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
  344. u16 pending_idx,
  345. grant_handle_t handle)
  346. {
  347. if (unlikely(queue->grant_tx_handle[pending_idx] !=
  348. NETBACK_INVALID_HANDLE)) {
  349. netdev_err(queue->vif->dev,
  350. "Trying to overwrite active handle! pending_idx: 0x%x\n",
  351. pending_idx);
  352. BUG();
  353. }
  354. queue->grant_tx_handle[pending_idx] = handle;
  355. }
  356. static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
  357. u16 pending_idx)
  358. {
  359. if (unlikely(queue->grant_tx_handle[pending_idx] ==
  360. NETBACK_INVALID_HANDLE)) {
  361. netdev_err(queue->vif->dev,
  362. "Trying to unmap invalid handle! pending_idx: 0x%x\n",
  363. pending_idx);
  364. BUG();
  365. }
  366. queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
  367. }
  368. static int xenvif_tx_check_gop(struct xenvif_queue *queue,
  369. struct sk_buff *skb,
  370. struct gnttab_map_grant_ref **gopp_map,
  371. struct gnttab_copy **gopp_copy)
  372. {
  373. struct gnttab_map_grant_ref *gop_map = *gopp_map;
  374. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  375. /* This always points to the shinfo of the skb being checked, which
  376. * could be either the first or the one on the frag_list
  377. */
  378. struct skb_shared_info *shinfo = skb_shinfo(skb);
  379. /* If this is non-NULL, we are currently checking the frag_list skb, and
  380. * this points to the shinfo of the first one
  381. */
  382. struct skb_shared_info *first_shinfo = NULL;
  383. int nr_frags = shinfo->nr_frags;
  384. const bool sharedslot = nr_frags &&
  385. frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
  386. int i, err;
  387. /* Check status of header. */
  388. err = (*gopp_copy)->status;
  389. if (unlikely(err)) {
  390. if (net_ratelimit())
  391. netdev_dbg(queue->vif->dev,
  392. "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
  393. (*gopp_copy)->status,
  394. pending_idx,
  395. (*gopp_copy)->source.u.ref);
  396. /* The first frag might still have this slot mapped */
  397. if (!sharedslot)
  398. xenvif_idx_release(queue, pending_idx,
  399. XEN_NETIF_RSP_ERROR);
  400. }
  401. (*gopp_copy)++;
  402. check_frags:
  403. for (i = 0; i < nr_frags; i++, gop_map++) {
  404. int j, newerr;
  405. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  406. /* Check error status: if okay then remember grant handle. */
  407. newerr = gop_map->status;
  408. if (likely(!newerr)) {
  409. xenvif_grant_handle_set(queue,
  410. pending_idx,
  411. gop_map->handle);
  412. /* Had a previous error? Invalidate this fragment. */
  413. if (unlikely(err)) {
  414. xenvif_idx_unmap(queue, pending_idx);
  415. /* If the mapping of the first frag was OK, but
  416. * the header's copy failed, and they are
  417. * sharing a slot, send an error
  418. */
  419. if (i == 0 && sharedslot)
  420. xenvif_idx_release(queue, pending_idx,
  421. XEN_NETIF_RSP_ERROR);
  422. else
  423. xenvif_idx_release(queue, pending_idx,
  424. XEN_NETIF_RSP_OKAY);
  425. }
  426. continue;
  427. }
  428. /* Error on this fragment: respond to client with an error. */
  429. if (net_ratelimit())
  430. netdev_dbg(queue->vif->dev,
  431. "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
  432. i,
  433. gop_map->status,
  434. pending_idx,
  435. gop_map->ref);
  436. xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
  437. /* Not the first error? Preceding frags already invalidated. */
  438. if (err)
  439. continue;
  440. /* First error: if the header haven't shared a slot with the
  441. * first frag, release it as well.
  442. */
  443. if (!sharedslot)
  444. xenvif_idx_release(queue,
  445. XENVIF_TX_CB(skb)->pending_idx,
  446. XEN_NETIF_RSP_OKAY);
  447. /* Invalidate preceding fragments of this skb. */
  448. for (j = 0; j < i; j++) {
  449. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  450. xenvif_idx_unmap(queue, pending_idx);
  451. xenvif_idx_release(queue, pending_idx,
  452. XEN_NETIF_RSP_OKAY);
  453. }
  454. /* And if we found the error while checking the frag_list, unmap
  455. * the first skb's frags
  456. */
  457. if (first_shinfo) {
  458. for (j = 0; j < first_shinfo->nr_frags; j++) {
  459. pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
  460. xenvif_idx_unmap(queue, pending_idx);
  461. xenvif_idx_release(queue, pending_idx,
  462. XEN_NETIF_RSP_OKAY);
  463. }
  464. }
  465. /* Remember the error: invalidate all subsequent fragments. */
  466. err = newerr;
  467. }
  468. if (skb_has_frag_list(skb) && !first_shinfo) {
  469. first_shinfo = skb_shinfo(skb);
  470. shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
  471. nr_frags = shinfo->nr_frags;
  472. goto check_frags;
  473. }
  474. *gopp_map = gop_map;
  475. return err;
  476. }
  477. static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
  478. {
  479. struct skb_shared_info *shinfo = skb_shinfo(skb);
  480. int nr_frags = shinfo->nr_frags;
  481. int i;
  482. u16 prev_pending_idx = INVALID_PENDING_IDX;
  483. for (i = 0; i < nr_frags; i++) {
  484. skb_frag_t *frag = shinfo->frags + i;
  485. struct xen_netif_tx_request *txp;
  486. struct page *page;
  487. u16 pending_idx;
  488. pending_idx = frag_get_pending_idx(frag);
  489. /* If this is not the first frag, chain it to the previous*/
  490. if (prev_pending_idx == INVALID_PENDING_IDX)
  491. skb_shinfo(skb)->destructor_arg =
  492. &callback_param(queue, pending_idx);
  493. else
  494. callback_param(queue, prev_pending_idx).ctx =
  495. &callback_param(queue, pending_idx);
  496. callback_param(queue, pending_idx).ctx = NULL;
  497. prev_pending_idx = pending_idx;
  498. txp = &queue->pending_tx_info[pending_idx].req;
  499. page = virt_to_page(idx_to_kaddr(queue, pending_idx));
  500. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  501. skb->len += txp->size;
  502. skb->data_len += txp->size;
  503. skb->truesize += txp->size;
  504. /* Take an extra reference to offset network stack's put_page */
  505. get_page(queue->mmap_pages[pending_idx]);
  506. }
  507. }
  508. static int xenvif_get_extras(struct xenvif_queue *queue,
  509. struct xen_netif_extra_info *extras,
  510. unsigned int *extra_count,
  511. int work_to_do)
  512. {
  513. struct xen_netif_extra_info extra;
  514. RING_IDX cons = queue->tx.req_cons;
  515. do {
  516. if (unlikely(work_to_do-- <= 0)) {
  517. netdev_err(queue->vif->dev, "Missing extra info\n");
  518. xenvif_fatal_tx_err(queue->vif);
  519. return -EBADR;
  520. }
  521. RING_COPY_REQUEST(&queue->tx, cons, &extra);
  522. queue->tx.req_cons = ++cons;
  523. (*extra_count)++;
  524. if (unlikely(!extra.type ||
  525. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  526. netdev_err(queue->vif->dev,
  527. "Invalid extra type: %d\n", extra.type);
  528. xenvif_fatal_tx_err(queue->vif);
  529. return -EINVAL;
  530. }
  531. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  532. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  533. return work_to_do;
  534. }
  535. static int xenvif_set_skb_gso(struct xenvif *vif,
  536. struct sk_buff *skb,
  537. struct xen_netif_extra_info *gso)
  538. {
  539. if (!gso->u.gso.size) {
  540. netdev_err(vif->dev, "GSO size must not be zero.\n");
  541. xenvif_fatal_tx_err(vif);
  542. return -EINVAL;
  543. }
  544. switch (gso->u.gso.type) {
  545. case XEN_NETIF_GSO_TYPE_TCPV4:
  546. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  547. break;
  548. case XEN_NETIF_GSO_TYPE_TCPV6:
  549. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  550. break;
  551. default:
  552. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  553. xenvif_fatal_tx_err(vif);
  554. return -EINVAL;
  555. }
  556. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  557. /* gso_segs will be calculated later */
  558. return 0;
  559. }
  560. static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
  561. {
  562. bool recalculate_partial_csum = false;
  563. /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  564. * peers can fail to set NETRXF_csum_blank when sending a GSO
  565. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  566. * recalculate the partial checksum.
  567. */
  568. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  569. queue->stats.rx_gso_checksum_fixup++;
  570. skb->ip_summed = CHECKSUM_PARTIAL;
  571. recalculate_partial_csum = true;
  572. }
  573. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  574. if (skb->ip_summed != CHECKSUM_PARTIAL)
  575. return 0;
  576. return skb_checksum_setup(skb, recalculate_partial_csum);
  577. }
  578. static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
  579. {
  580. u64 now = get_jiffies_64();
  581. u64 next_credit = queue->credit_window_start +
  582. msecs_to_jiffies(queue->credit_usec / 1000);
  583. /* Timer could already be pending in rare cases. */
  584. if (timer_pending(&queue->credit_timeout)) {
  585. queue->rate_limited = true;
  586. return true;
  587. }
  588. /* Passed the point where we can replenish credit? */
  589. if (time_after_eq64(now, next_credit)) {
  590. queue->credit_window_start = now;
  591. tx_add_credit(queue);
  592. }
  593. /* Still too big to send right now? Set a callback. */
  594. if (size > queue->remaining_credit) {
  595. mod_timer(&queue->credit_timeout,
  596. next_credit);
  597. queue->credit_window_start = next_credit;
  598. queue->rate_limited = true;
  599. return true;
  600. }
  601. return false;
  602. }
  603. /* No locking is required in xenvif_mcast_add/del() as they are
  604. * only ever invoked from NAPI poll. An RCU list is used because
  605. * xenvif_mcast_match() is called asynchronously, during start_xmit.
  606. */
  607. static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
  608. {
  609. struct xenvif_mcast_addr *mcast;
  610. if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
  611. if (net_ratelimit())
  612. netdev_err(vif->dev,
  613. "Too many multicast addresses\n");
  614. return -ENOSPC;
  615. }
  616. mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
  617. if (!mcast)
  618. return -ENOMEM;
  619. ether_addr_copy(mcast->addr, addr);
  620. list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
  621. vif->fe_mcast_count++;
  622. return 0;
  623. }
  624. static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
  625. {
  626. struct xenvif_mcast_addr *mcast;
  627. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  628. if (ether_addr_equal(addr, mcast->addr)) {
  629. --vif->fe_mcast_count;
  630. list_del_rcu(&mcast->entry);
  631. kfree_rcu(mcast, rcu);
  632. break;
  633. }
  634. }
  635. }
  636. bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
  637. {
  638. struct xenvif_mcast_addr *mcast;
  639. rcu_read_lock();
  640. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  641. if (ether_addr_equal(addr, mcast->addr)) {
  642. rcu_read_unlock();
  643. return true;
  644. }
  645. }
  646. rcu_read_unlock();
  647. return false;
  648. }
  649. void xenvif_mcast_addr_list_free(struct xenvif *vif)
  650. {
  651. /* No need for locking or RCU here. NAPI poll and TX queue
  652. * are stopped.
  653. */
  654. while (!list_empty(&vif->fe_mcast_addr)) {
  655. struct xenvif_mcast_addr *mcast;
  656. mcast = list_first_entry(&vif->fe_mcast_addr,
  657. struct xenvif_mcast_addr,
  658. entry);
  659. --vif->fe_mcast_count;
  660. list_del(&mcast->entry);
  661. kfree(mcast);
  662. }
  663. }
  664. static void xenvif_tx_build_gops(struct xenvif_queue *queue,
  665. int budget,
  666. unsigned *copy_ops,
  667. unsigned *map_ops)
  668. {
  669. struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
  670. struct sk_buff *skb, *nskb;
  671. int ret;
  672. unsigned int frag_overflow;
  673. while (skb_queue_len(&queue->tx_queue) < budget) {
  674. struct xen_netif_tx_request txreq;
  675. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  676. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  677. unsigned int extra_count;
  678. u16 pending_idx;
  679. RING_IDX idx;
  680. int work_to_do;
  681. unsigned int data_len;
  682. pending_ring_idx_t index;
  683. if (queue->tx.sring->req_prod - queue->tx.req_cons >
  684. XEN_NETIF_TX_RING_SIZE) {
  685. netdev_err(queue->vif->dev,
  686. "Impossible number of requests. "
  687. "req_prod %d, req_cons %d, size %ld\n",
  688. queue->tx.sring->req_prod, queue->tx.req_cons,
  689. XEN_NETIF_TX_RING_SIZE);
  690. xenvif_fatal_tx_err(queue->vif);
  691. break;
  692. }
  693. work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
  694. if (!work_to_do)
  695. break;
  696. idx = queue->tx.req_cons;
  697. rmb(); /* Ensure that we see the request before we copy it. */
  698. RING_COPY_REQUEST(&queue->tx, idx, &txreq);
  699. /* Credit-based scheduling. */
  700. if (txreq.size > queue->remaining_credit &&
  701. tx_credit_exceeded(queue, txreq.size))
  702. break;
  703. queue->remaining_credit -= txreq.size;
  704. work_to_do--;
  705. queue->tx.req_cons = ++idx;
  706. memset(extras, 0, sizeof(extras));
  707. extra_count = 0;
  708. if (txreq.flags & XEN_NETTXF_extra_info) {
  709. work_to_do = xenvif_get_extras(queue, extras,
  710. &extra_count,
  711. work_to_do);
  712. idx = queue->tx.req_cons;
  713. if (unlikely(work_to_do < 0))
  714. break;
  715. }
  716. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
  717. struct xen_netif_extra_info *extra;
  718. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
  719. ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
  720. make_tx_response(queue, &txreq, extra_count,
  721. (ret == 0) ?
  722. XEN_NETIF_RSP_OKAY :
  723. XEN_NETIF_RSP_ERROR);
  724. push_tx_responses(queue);
  725. continue;
  726. }
  727. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
  728. struct xen_netif_extra_info *extra;
  729. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
  730. xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
  731. make_tx_response(queue, &txreq, extra_count,
  732. XEN_NETIF_RSP_OKAY);
  733. push_tx_responses(queue);
  734. continue;
  735. }
  736. ret = xenvif_count_requests(queue, &txreq, extra_count,
  737. txfrags, work_to_do);
  738. if (unlikely(ret < 0))
  739. break;
  740. idx += ret;
  741. if (unlikely(txreq.size < ETH_HLEN)) {
  742. netdev_dbg(queue->vif->dev,
  743. "Bad packet size: %d\n", txreq.size);
  744. xenvif_tx_err(queue, &txreq, extra_count, idx);
  745. break;
  746. }
  747. /* No crossing a page as the payload mustn't fragment. */
  748. if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
  749. netdev_err(queue->vif->dev,
  750. "txreq.offset: %u, size: %u, end: %lu\n",
  751. txreq.offset, txreq.size,
  752. (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
  753. xenvif_fatal_tx_err(queue->vif);
  754. break;
  755. }
  756. index = pending_index(queue->pending_cons);
  757. pending_idx = queue->pending_ring[index];
  758. data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
  759. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  760. XEN_NETBACK_TX_COPY_LEN : txreq.size;
  761. skb = xenvif_alloc_skb(data_len);
  762. if (unlikely(skb == NULL)) {
  763. netdev_dbg(queue->vif->dev,
  764. "Can't allocate a skb in start_xmit.\n");
  765. xenvif_tx_err(queue, &txreq, extra_count, idx);
  766. break;
  767. }
  768. skb_shinfo(skb)->nr_frags = ret;
  769. if (data_len < txreq.size)
  770. skb_shinfo(skb)->nr_frags++;
  771. /* At this point shinfo->nr_frags is in fact the number of
  772. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  773. */
  774. frag_overflow = 0;
  775. nskb = NULL;
  776. if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
  777. frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
  778. BUG_ON(frag_overflow > MAX_SKB_FRAGS);
  779. skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
  780. nskb = xenvif_alloc_skb(0);
  781. if (unlikely(nskb == NULL)) {
  782. skb_shinfo(skb)->nr_frags = 0;
  783. kfree_skb(skb);
  784. xenvif_tx_err(queue, &txreq, extra_count, idx);
  785. if (net_ratelimit())
  786. netdev_err(queue->vif->dev,
  787. "Can't allocate the frag_list skb.\n");
  788. break;
  789. }
  790. }
  791. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  792. struct xen_netif_extra_info *gso;
  793. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  794. if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
  795. /* Failure in xenvif_set_skb_gso is fatal. */
  796. skb_shinfo(skb)->nr_frags = 0;
  797. kfree_skb(skb);
  798. kfree_skb(nskb);
  799. break;
  800. }
  801. }
  802. if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
  803. struct xen_netif_extra_info *extra;
  804. enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
  805. extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
  806. switch (extra->u.hash.type) {
  807. case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
  808. case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
  809. type = PKT_HASH_TYPE_L3;
  810. break;
  811. case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
  812. case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
  813. type = PKT_HASH_TYPE_L4;
  814. break;
  815. default:
  816. break;
  817. }
  818. if (type != PKT_HASH_TYPE_NONE)
  819. skb_set_hash(skb,
  820. *(u32 *)extra->u.hash.value,
  821. type);
  822. }
  823. XENVIF_TX_CB(skb)->pending_idx = pending_idx;
  824. __skb_put(skb, data_len);
  825. queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
  826. queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
  827. queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
  828. queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
  829. virt_to_gfn(skb->data);
  830. queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
  831. queue->tx_copy_ops[*copy_ops].dest.offset =
  832. offset_in_page(skb->data) & ~XEN_PAGE_MASK;
  833. queue->tx_copy_ops[*copy_ops].len = data_len;
  834. queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
  835. (*copy_ops)++;
  836. if (data_len < txreq.size) {
  837. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  838. pending_idx);
  839. xenvif_tx_create_map_op(queue, pending_idx, &txreq,
  840. extra_count, gop);
  841. gop++;
  842. } else {
  843. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  844. INVALID_PENDING_IDX);
  845. memcpy(&queue->pending_tx_info[pending_idx].req,
  846. &txreq, sizeof(txreq));
  847. queue->pending_tx_info[pending_idx].extra_count =
  848. extra_count;
  849. }
  850. queue->pending_cons++;
  851. gop = xenvif_get_requests(queue, skb, txfrags, gop,
  852. frag_overflow, nskb);
  853. __skb_queue_tail(&queue->tx_queue, skb);
  854. queue->tx.req_cons = idx;
  855. if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
  856. (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
  857. break;
  858. }
  859. (*map_ops) = gop - queue->tx_map_ops;
  860. return;
  861. }
  862. /* Consolidate skb with a frag_list into a brand new one with local pages on
  863. * frags. Returns 0 or -ENOMEM if can't allocate new pages.
  864. */
  865. static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
  866. {
  867. unsigned int offset = skb_headlen(skb);
  868. skb_frag_t frags[MAX_SKB_FRAGS];
  869. int i, f;
  870. struct ubuf_info *uarg;
  871. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  872. queue->stats.tx_zerocopy_sent += 2;
  873. queue->stats.tx_frag_overflow++;
  874. xenvif_fill_frags(queue, nskb);
  875. /* Subtract frags size, we will correct it later */
  876. skb->truesize -= skb->data_len;
  877. skb->len += nskb->len;
  878. skb->data_len += nskb->len;
  879. /* create a brand new frags array and coalesce there */
  880. for (i = 0; offset < skb->len; i++) {
  881. struct page *page;
  882. unsigned int len;
  883. BUG_ON(i >= MAX_SKB_FRAGS);
  884. page = alloc_page(GFP_ATOMIC);
  885. if (!page) {
  886. int j;
  887. skb->truesize += skb->data_len;
  888. for (j = 0; j < i; j++)
  889. put_page(frags[j].page.p);
  890. return -ENOMEM;
  891. }
  892. if (offset + PAGE_SIZE < skb->len)
  893. len = PAGE_SIZE;
  894. else
  895. len = skb->len - offset;
  896. if (skb_copy_bits(skb, offset, page_address(page), len))
  897. BUG();
  898. offset += len;
  899. frags[i].page.p = page;
  900. frags[i].page_offset = 0;
  901. skb_frag_size_set(&frags[i], len);
  902. }
  903. /* Release all the original (foreign) frags. */
  904. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  905. skb_frag_unref(skb, f);
  906. uarg = skb_shinfo(skb)->destructor_arg;
  907. /* increase inflight counter to offset decrement in callback */
  908. atomic_inc(&queue->inflight_packets);
  909. uarg->callback(uarg, true);
  910. skb_shinfo(skb)->destructor_arg = NULL;
  911. /* Fill the skb with the new (local) frags. */
  912. memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
  913. skb_shinfo(skb)->nr_frags = i;
  914. skb->truesize += i * PAGE_SIZE;
  915. return 0;
  916. }
  917. static int xenvif_tx_submit(struct xenvif_queue *queue)
  918. {
  919. struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
  920. struct gnttab_copy *gop_copy = queue->tx_copy_ops;
  921. struct sk_buff *skb;
  922. int work_done = 0;
  923. while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
  924. struct xen_netif_tx_request *txp;
  925. u16 pending_idx;
  926. unsigned data_len;
  927. pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  928. txp = &queue->pending_tx_info[pending_idx].req;
  929. /* Check the remap error code. */
  930. if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
  931. /* If there was an error, xenvif_tx_check_gop is
  932. * expected to release all the frags which were mapped,
  933. * so kfree_skb shouldn't do it again
  934. */
  935. skb_shinfo(skb)->nr_frags = 0;
  936. if (skb_has_frag_list(skb)) {
  937. struct sk_buff *nskb =
  938. skb_shinfo(skb)->frag_list;
  939. skb_shinfo(nskb)->nr_frags = 0;
  940. }
  941. kfree_skb(skb);
  942. continue;
  943. }
  944. data_len = skb->len;
  945. callback_param(queue, pending_idx).ctx = NULL;
  946. if (data_len < txp->size) {
  947. /* Append the packet payload as a fragment. */
  948. txp->offset += data_len;
  949. txp->size -= data_len;
  950. } else {
  951. /* Schedule a response immediately. */
  952. xenvif_idx_release(queue, pending_idx,
  953. XEN_NETIF_RSP_OKAY);
  954. }
  955. if (txp->flags & XEN_NETTXF_csum_blank)
  956. skb->ip_summed = CHECKSUM_PARTIAL;
  957. else if (txp->flags & XEN_NETTXF_data_validated)
  958. skb->ip_summed = CHECKSUM_UNNECESSARY;
  959. xenvif_fill_frags(queue, skb);
  960. if (unlikely(skb_has_frag_list(skb))) {
  961. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  962. xenvif_skb_zerocopy_prepare(queue, nskb);
  963. if (xenvif_handle_frag_list(queue, skb)) {
  964. if (net_ratelimit())
  965. netdev_err(queue->vif->dev,
  966. "Not enough memory to consolidate frag_list!\n");
  967. xenvif_skb_zerocopy_prepare(queue, skb);
  968. kfree_skb(skb);
  969. continue;
  970. }
  971. /* Copied all the bits from the frag list -- free it. */
  972. skb_frag_list_init(skb);
  973. kfree_skb(nskb);
  974. }
  975. skb->dev = queue->vif->dev;
  976. skb->protocol = eth_type_trans(skb, skb->dev);
  977. skb_reset_network_header(skb);
  978. if (checksum_setup(queue, skb)) {
  979. netdev_dbg(queue->vif->dev,
  980. "Can't setup checksum in net_tx_action\n");
  981. /* We have to set this flag to trigger the callback */
  982. if (skb_shinfo(skb)->destructor_arg)
  983. xenvif_skb_zerocopy_prepare(queue, skb);
  984. kfree_skb(skb);
  985. continue;
  986. }
  987. skb_probe_transport_header(skb, 0);
  988. /* If the packet is GSO then we will have just set up the
  989. * transport header offset in checksum_setup so it's now
  990. * straightforward to calculate gso_segs.
  991. */
  992. if (skb_is_gso(skb)) {
  993. int mss = skb_shinfo(skb)->gso_size;
  994. int hdrlen = skb_transport_header(skb) -
  995. skb_mac_header(skb) +
  996. tcp_hdrlen(skb);
  997. skb_shinfo(skb)->gso_segs =
  998. DIV_ROUND_UP(skb->len - hdrlen, mss);
  999. }
  1000. queue->stats.rx_bytes += skb->len;
  1001. queue->stats.rx_packets++;
  1002. work_done++;
  1003. /* Set this flag right before netif_receive_skb, otherwise
  1004. * someone might think this packet already left netback, and
  1005. * do a skb_copy_ubufs while we are still in control of the
  1006. * skb. E.g. the __pskb_pull_tail earlier can do such thing.
  1007. */
  1008. if (skb_shinfo(skb)->destructor_arg) {
  1009. xenvif_skb_zerocopy_prepare(queue, skb);
  1010. queue->stats.tx_zerocopy_sent++;
  1011. }
  1012. netif_receive_skb(skb);
  1013. }
  1014. return work_done;
  1015. }
  1016. void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
  1017. {
  1018. unsigned long flags;
  1019. pending_ring_idx_t index;
  1020. struct xenvif_queue *queue = ubuf_to_queue(ubuf);
  1021. /* This is the only place where we grab this lock, to protect callbacks
  1022. * from each other.
  1023. */
  1024. spin_lock_irqsave(&queue->callback_lock, flags);
  1025. do {
  1026. u16 pending_idx = ubuf->desc;
  1027. ubuf = (struct ubuf_info *) ubuf->ctx;
  1028. BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
  1029. MAX_PENDING_REQS);
  1030. index = pending_index(queue->dealloc_prod);
  1031. queue->dealloc_ring[index] = pending_idx;
  1032. /* Sync with xenvif_tx_dealloc_action:
  1033. * insert idx then incr producer.
  1034. */
  1035. smp_wmb();
  1036. queue->dealloc_prod++;
  1037. } while (ubuf);
  1038. spin_unlock_irqrestore(&queue->callback_lock, flags);
  1039. if (likely(zerocopy_success))
  1040. queue->stats.tx_zerocopy_success++;
  1041. else
  1042. queue->stats.tx_zerocopy_fail++;
  1043. xenvif_skb_zerocopy_complete(queue);
  1044. }
  1045. static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
  1046. {
  1047. struct gnttab_unmap_grant_ref *gop;
  1048. pending_ring_idx_t dc, dp;
  1049. u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
  1050. unsigned int i = 0;
  1051. dc = queue->dealloc_cons;
  1052. gop = queue->tx_unmap_ops;
  1053. /* Free up any grants we have finished using */
  1054. do {
  1055. dp = queue->dealloc_prod;
  1056. /* Ensure we see all indices enqueued by all
  1057. * xenvif_zerocopy_callback().
  1058. */
  1059. smp_rmb();
  1060. while (dc != dp) {
  1061. BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
  1062. pending_idx =
  1063. queue->dealloc_ring[pending_index(dc++)];
  1064. pending_idx_release[gop - queue->tx_unmap_ops] =
  1065. pending_idx;
  1066. queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
  1067. queue->mmap_pages[pending_idx];
  1068. gnttab_set_unmap_op(gop,
  1069. idx_to_kaddr(queue, pending_idx),
  1070. GNTMAP_host_map,
  1071. queue->grant_tx_handle[pending_idx]);
  1072. xenvif_grant_handle_reset(queue, pending_idx);
  1073. ++gop;
  1074. }
  1075. } while (dp != queue->dealloc_prod);
  1076. queue->dealloc_cons = dc;
  1077. if (gop - queue->tx_unmap_ops > 0) {
  1078. int ret;
  1079. ret = gnttab_unmap_refs(queue->tx_unmap_ops,
  1080. NULL,
  1081. queue->pages_to_unmap,
  1082. gop - queue->tx_unmap_ops);
  1083. if (ret) {
  1084. netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
  1085. gop - queue->tx_unmap_ops, ret);
  1086. for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
  1087. if (gop[i].status != GNTST_okay)
  1088. netdev_err(queue->vif->dev,
  1089. " host_addr: 0x%llx handle: 0x%x status: %d\n",
  1090. gop[i].host_addr,
  1091. gop[i].handle,
  1092. gop[i].status);
  1093. }
  1094. BUG();
  1095. }
  1096. }
  1097. for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
  1098. xenvif_idx_release(queue, pending_idx_release[i],
  1099. XEN_NETIF_RSP_OKAY);
  1100. }
  1101. /* Called after netfront has transmitted */
  1102. int xenvif_tx_action(struct xenvif_queue *queue, int budget)
  1103. {
  1104. unsigned nr_mops, nr_cops = 0;
  1105. int work_done, ret;
  1106. if (unlikely(!tx_work_todo(queue)))
  1107. return 0;
  1108. xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
  1109. if (nr_cops == 0)
  1110. return 0;
  1111. gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
  1112. if (nr_mops != 0) {
  1113. ret = gnttab_map_refs(queue->tx_map_ops,
  1114. NULL,
  1115. queue->pages_to_map,
  1116. nr_mops);
  1117. if (ret) {
  1118. unsigned int i;
  1119. netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
  1120. nr_mops, ret);
  1121. for (i = 0; i < nr_mops; ++i)
  1122. WARN_ON_ONCE(queue->tx_map_ops[i].status ==
  1123. GNTST_okay);
  1124. }
  1125. }
  1126. work_done = xenvif_tx_submit(queue);
  1127. return work_done;
  1128. }
  1129. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  1130. u8 status)
  1131. {
  1132. struct pending_tx_info *pending_tx_info;
  1133. pending_ring_idx_t index;
  1134. unsigned long flags;
  1135. pending_tx_info = &queue->pending_tx_info[pending_idx];
  1136. spin_lock_irqsave(&queue->response_lock, flags);
  1137. make_tx_response(queue, &pending_tx_info->req,
  1138. pending_tx_info->extra_count, status);
  1139. /* Release the pending index before pusing the Tx response so
  1140. * its available before a new Tx request is pushed by the
  1141. * frontend.
  1142. */
  1143. index = pending_index(queue->pending_prod++);
  1144. queue->pending_ring[index] = pending_idx;
  1145. push_tx_responses(queue);
  1146. spin_unlock_irqrestore(&queue->response_lock, flags);
  1147. }
  1148. static void make_tx_response(struct xenvif_queue *queue,
  1149. struct xen_netif_tx_request *txp,
  1150. unsigned int extra_count,
  1151. s8 st)
  1152. {
  1153. RING_IDX i = queue->tx.rsp_prod_pvt;
  1154. struct xen_netif_tx_response *resp;
  1155. resp = RING_GET_RESPONSE(&queue->tx, i);
  1156. resp->id = txp->id;
  1157. resp->status = st;
  1158. while (extra_count-- != 0)
  1159. RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1160. queue->tx.rsp_prod_pvt = ++i;
  1161. }
  1162. static void push_tx_responses(struct xenvif_queue *queue)
  1163. {
  1164. int notify;
  1165. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
  1166. if (notify)
  1167. notify_remote_via_irq(queue->tx_irq);
  1168. }
  1169. void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
  1170. {
  1171. int ret;
  1172. struct gnttab_unmap_grant_ref tx_unmap_op;
  1173. gnttab_set_unmap_op(&tx_unmap_op,
  1174. idx_to_kaddr(queue, pending_idx),
  1175. GNTMAP_host_map,
  1176. queue->grant_tx_handle[pending_idx]);
  1177. xenvif_grant_handle_reset(queue, pending_idx);
  1178. ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
  1179. &queue->mmap_pages[pending_idx], 1);
  1180. if (ret) {
  1181. netdev_err(queue->vif->dev,
  1182. "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
  1183. ret,
  1184. pending_idx,
  1185. tx_unmap_op.host_addr,
  1186. tx_unmap_op.handle,
  1187. tx_unmap_op.status);
  1188. BUG();
  1189. }
  1190. }
  1191. static inline int tx_work_todo(struct xenvif_queue *queue)
  1192. {
  1193. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
  1194. return 1;
  1195. return 0;
  1196. }
  1197. static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
  1198. {
  1199. return queue->dealloc_cons != queue->dealloc_prod;
  1200. }
  1201. void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
  1202. {
  1203. if (queue->tx.sring)
  1204. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1205. queue->tx.sring);
  1206. if (queue->rx.sring)
  1207. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1208. queue->rx.sring);
  1209. }
  1210. int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
  1211. grant_ref_t tx_ring_ref,
  1212. grant_ref_t rx_ring_ref)
  1213. {
  1214. void *addr;
  1215. struct xen_netif_tx_sring *txs;
  1216. struct xen_netif_rx_sring *rxs;
  1217. int err = -ENOMEM;
  1218. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1219. &tx_ring_ref, 1, &addr);
  1220. if (err)
  1221. goto err;
  1222. txs = (struct xen_netif_tx_sring *)addr;
  1223. BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
  1224. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1225. &rx_ring_ref, 1, &addr);
  1226. if (err)
  1227. goto err;
  1228. rxs = (struct xen_netif_rx_sring *)addr;
  1229. BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
  1230. return 0;
  1231. err:
  1232. xenvif_unmap_frontend_data_rings(queue);
  1233. return err;
  1234. }
  1235. static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
  1236. {
  1237. /* Dealloc thread must remain running until all inflight
  1238. * packets complete.
  1239. */
  1240. return kthread_should_stop() &&
  1241. !atomic_read(&queue->inflight_packets);
  1242. }
  1243. int xenvif_dealloc_kthread(void *data)
  1244. {
  1245. struct xenvif_queue *queue = data;
  1246. for (;;) {
  1247. wait_event_interruptible(queue->dealloc_wq,
  1248. tx_dealloc_work_todo(queue) ||
  1249. xenvif_dealloc_kthread_should_stop(queue));
  1250. if (xenvif_dealloc_kthread_should_stop(queue))
  1251. break;
  1252. xenvif_tx_dealloc_action(queue);
  1253. cond_resched();
  1254. }
  1255. /* Unmap anything remaining*/
  1256. if (tx_dealloc_work_todo(queue))
  1257. xenvif_tx_dealloc_action(queue);
  1258. return 0;
  1259. }
  1260. static void make_ctrl_response(struct xenvif *vif,
  1261. const struct xen_netif_ctrl_request *req,
  1262. u32 status, u32 data)
  1263. {
  1264. RING_IDX idx = vif->ctrl.rsp_prod_pvt;
  1265. struct xen_netif_ctrl_response rsp = {
  1266. .id = req->id,
  1267. .type = req->type,
  1268. .status = status,
  1269. .data = data,
  1270. };
  1271. *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
  1272. vif->ctrl.rsp_prod_pvt = ++idx;
  1273. }
  1274. static void push_ctrl_response(struct xenvif *vif)
  1275. {
  1276. int notify;
  1277. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
  1278. if (notify)
  1279. notify_remote_via_irq(vif->ctrl_irq);
  1280. }
  1281. static void process_ctrl_request(struct xenvif *vif,
  1282. const struct xen_netif_ctrl_request *req)
  1283. {
  1284. u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
  1285. u32 data = 0;
  1286. switch (req->type) {
  1287. case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
  1288. status = xenvif_set_hash_alg(vif, req->data[0]);
  1289. break;
  1290. case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
  1291. status = xenvif_get_hash_flags(vif, &data);
  1292. break;
  1293. case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
  1294. status = xenvif_set_hash_flags(vif, req->data[0]);
  1295. break;
  1296. case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
  1297. status = xenvif_set_hash_key(vif, req->data[0],
  1298. req->data[1]);
  1299. break;
  1300. case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
  1301. status = XEN_NETIF_CTRL_STATUS_SUCCESS;
  1302. data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
  1303. break;
  1304. case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
  1305. status = xenvif_set_hash_mapping_size(vif,
  1306. req->data[0]);
  1307. break;
  1308. case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
  1309. status = xenvif_set_hash_mapping(vif, req->data[0],
  1310. req->data[1],
  1311. req->data[2]);
  1312. break;
  1313. default:
  1314. break;
  1315. }
  1316. make_ctrl_response(vif, req, status, data);
  1317. push_ctrl_response(vif);
  1318. }
  1319. static void xenvif_ctrl_action(struct xenvif *vif)
  1320. {
  1321. for (;;) {
  1322. RING_IDX req_prod, req_cons;
  1323. req_prod = vif->ctrl.sring->req_prod;
  1324. req_cons = vif->ctrl.req_cons;
  1325. /* Make sure we can see requests before we process them. */
  1326. rmb();
  1327. if (req_cons == req_prod)
  1328. break;
  1329. while (req_cons != req_prod) {
  1330. struct xen_netif_ctrl_request req;
  1331. RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
  1332. req_cons++;
  1333. process_ctrl_request(vif, &req);
  1334. }
  1335. vif->ctrl.req_cons = req_cons;
  1336. vif->ctrl.sring->req_event = req_cons + 1;
  1337. }
  1338. }
  1339. static bool xenvif_ctrl_work_todo(struct xenvif *vif)
  1340. {
  1341. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
  1342. return true;
  1343. return false;
  1344. }
  1345. irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
  1346. {
  1347. struct xenvif *vif = data;
  1348. unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
  1349. while (xenvif_ctrl_work_todo(vif)) {
  1350. xenvif_ctrl_action(vif);
  1351. eoi_flag = 0;
  1352. }
  1353. xen_irq_lateeoi(irq, eoi_flag);
  1354. return IRQ_HANDLED;
  1355. }
  1356. static int __init netback_init(void)
  1357. {
  1358. int rc = 0;
  1359. if (!xen_domain())
  1360. return -ENODEV;
  1361. /* Allow as many queues as there are CPUs but max. 8 if user has not
  1362. * specified a value.
  1363. */
  1364. if (xenvif_max_queues == 0)
  1365. xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
  1366. num_online_cpus());
  1367. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1368. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1369. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1370. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1371. }
  1372. rc = xenvif_xenbus_init();
  1373. if (rc)
  1374. goto failed_init;
  1375. #ifdef CONFIG_DEBUG_FS
  1376. xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
  1377. if (IS_ERR_OR_NULL(xen_netback_dbg_root))
  1378. pr_warn("Init of debugfs returned %ld!\n",
  1379. PTR_ERR(xen_netback_dbg_root));
  1380. #endif /* CONFIG_DEBUG_FS */
  1381. return 0;
  1382. failed_init:
  1383. return rc;
  1384. }
  1385. module_init(netback_init);
  1386. static void __exit netback_fini(void)
  1387. {
  1388. #ifdef CONFIG_DEBUG_FS
  1389. if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
  1390. debugfs_remove_recursive(xen_netback_dbg_root);
  1391. #endif /* CONFIG_DEBUG_FS */
  1392. xenvif_xenbus_fini();
  1393. }
  1394. module_exit(netback_fini);
  1395. MODULE_LICENSE("Dual BSD/GPL");
  1396. MODULE_ALIAS("xen-backend:vif");