xfrm_device.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. /*
  2. * xfrm_device.c - IPsec device offloading code.
  3. *
  4. * Copyright (c) 2015 secunet Security Networks AG
  5. *
  6. * Author:
  7. * Steffen Klassert <steffen.klassert@secunet.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/errno.h>
  15. #include <linux/module.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <net/dst.h>
  21. #include <net/xfrm.h>
  22. #include <linux/notifier.h>
  23. #ifdef CONFIG_XFRM_OFFLOAD
  24. struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
  25. {
  26. int err;
  27. unsigned long flags;
  28. struct xfrm_state *x;
  29. struct sk_buff *skb2;
  30. struct softnet_data *sd;
  31. netdev_features_t esp_features = features;
  32. struct xfrm_offload *xo = xfrm_offload(skb);
  33. if (!xo || (xo->flags & XFRM_XMIT))
  34. return skb;
  35. if (!(features & NETIF_F_HW_ESP))
  36. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
  37. x = skb->sp->xvec[skb->sp->len - 1];
  38. if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
  39. return skb;
  40. local_irq_save(flags);
  41. sd = this_cpu_ptr(&softnet_data);
  42. err = !skb_queue_empty(&sd->xfrm_backlog);
  43. local_irq_restore(flags);
  44. if (err) {
  45. *again = true;
  46. return skb;
  47. }
  48. xo->flags |= XFRM_XMIT;
  49. if (skb_is_gso(skb)) {
  50. struct net_device *dev = skb->dev;
  51. if (unlikely(x->xso.dev != dev)) {
  52. struct sk_buff *segs;
  53. /* Packet got rerouted, fixup features and segment it. */
  54. esp_features = esp_features & ~(NETIF_F_HW_ESP
  55. | NETIF_F_GSO_ESP);
  56. segs = skb_gso_segment(skb, esp_features);
  57. if (IS_ERR(segs)) {
  58. kfree_skb(skb);
  59. atomic_long_inc(&dev->tx_dropped);
  60. return NULL;
  61. } else {
  62. consume_skb(skb);
  63. skb = segs;
  64. }
  65. }
  66. }
  67. if (!skb->next) {
  68. x->outer_mode->xmit(x, skb);
  69. xo->flags |= XFRM_DEV_RESUME;
  70. err = x->type_offload->xmit(x, skb, esp_features);
  71. if (err) {
  72. if (err == -EINPROGRESS)
  73. return NULL;
  74. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  75. kfree_skb(skb);
  76. return NULL;
  77. }
  78. skb_push(skb, skb->data - skb_mac_header(skb));
  79. return skb;
  80. }
  81. skb2 = skb;
  82. do {
  83. struct sk_buff *nskb = skb2->next;
  84. skb2->next = NULL;
  85. xo = xfrm_offload(skb2);
  86. xo->flags |= XFRM_DEV_RESUME;
  87. x->outer_mode->xmit(x, skb2);
  88. err = x->type_offload->xmit(x, skb2, esp_features);
  89. if (!err) {
  90. skb2->next = nskb;
  91. } else if (err != -EINPROGRESS) {
  92. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  93. skb2->next = nskb;
  94. kfree_skb_list(skb2);
  95. return NULL;
  96. } else {
  97. if (skb == skb2)
  98. skb = nskb;
  99. if (!skb)
  100. return NULL;
  101. goto skip_push;
  102. }
  103. skb_push(skb2, skb2->data - skb_mac_header(skb2));
  104. skip_push:
  105. skb2 = nskb;
  106. } while (skb2);
  107. return skb;
  108. }
  109. EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
  110. int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
  111. struct xfrm_user_offload *xuo)
  112. {
  113. int err;
  114. struct dst_entry *dst;
  115. struct net_device *dev;
  116. struct xfrm_state_offload *xso = &x->xso;
  117. xfrm_address_t *saddr;
  118. xfrm_address_t *daddr;
  119. if (!x->type_offload)
  120. return -EINVAL;
  121. /* We don't yet support UDP encapsulation and TFC padding. */
  122. if (x->encap || x->tfcpad)
  123. return -EINVAL;
  124. dev = dev_get_by_index(net, xuo->ifindex);
  125. if (!dev) {
  126. if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
  127. saddr = &x->props.saddr;
  128. daddr = &x->id.daddr;
  129. } else {
  130. saddr = &x->id.daddr;
  131. daddr = &x->props.saddr;
  132. }
  133. dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
  134. x->props.family,
  135. xfrm_smark_get(0, x));
  136. if (IS_ERR(dst))
  137. return 0;
  138. dev = dst->dev;
  139. dev_hold(dev);
  140. dst_release(dst);
  141. }
  142. if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
  143. xso->dev = NULL;
  144. dev_put(dev);
  145. return 0;
  146. }
  147. if (x->props.flags & XFRM_STATE_ESN &&
  148. !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
  149. xso->dev = NULL;
  150. dev_put(dev);
  151. return -EINVAL;
  152. }
  153. xso->dev = dev;
  154. xso->num_exthdrs = 1;
  155. xso->flags = xuo->flags;
  156. err = dev->xfrmdev_ops->xdo_dev_state_add(x);
  157. if (err) {
  158. xso->dev = NULL;
  159. dev_put(dev);
  160. return err;
  161. }
  162. return 0;
  163. }
  164. EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
  165. bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
  166. {
  167. int mtu;
  168. struct dst_entry *dst = skb_dst(skb);
  169. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  170. struct net_device *dev = x->xso.dev;
  171. if (!x->type_offload || x->encap)
  172. return false;
  173. if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
  174. (!xdst->child->xfrm && x->type->get_mtu)) {
  175. mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
  176. if (skb->len <= mtu)
  177. goto ok;
  178. if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
  179. goto ok;
  180. }
  181. return false;
  182. ok:
  183. if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
  184. return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
  185. return true;
  186. }
  187. EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
  188. void xfrm_dev_resume(struct sk_buff *skb)
  189. {
  190. struct net_device *dev = skb->dev;
  191. int ret = NETDEV_TX_BUSY;
  192. struct netdev_queue *txq;
  193. struct softnet_data *sd;
  194. unsigned long flags;
  195. rcu_read_lock();
  196. txq = netdev_pick_tx(dev, skb, NULL);
  197. HARD_TX_LOCK(dev, txq, smp_processor_id());
  198. if (!netif_xmit_frozen_or_stopped(txq))
  199. skb = dev_hard_start_xmit(skb, dev, txq, &ret);
  200. HARD_TX_UNLOCK(dev, txq);
  201. if (!dev_xmit_complete(ret)) {
  202. local_irq_save(flags);
  203. sd = this_cpu_ptr(&softnet_data);
  204. skb_queue_tail(&sd->xfrm_backlog, skb);
  205. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  206. local_irq_restore(flags);
  207. }
  208. rcu_read_unlock();
  209. }
  210. EXPORT_SYMBOL_GPL(xfrm_dev_resume);
  211. void xfrm_dev_backlog(struct softnet_data *sd)
  212. {
  213. struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
  214. struct sk_buff_head list;
  215. struct sk_buff *skb;
  216. if (skb_queue_empty(xfrm_backlog))
  217. return;
  218. __skb_queue_head_init(&list);
  219. spin_lock(&xfrm_backlog->lock);
  220. skb_queue_splice_init(xfrm_backlog, &list);
  221. spin_unlock(&xfrm_backlog->lock);
  222. while (!skb_queue_empty(&list)) {
  223. skb = __skb_dequeue(&list);
  224. xfrm_dev_resume(skb);
  225. }
  226. }
  227. #endif
  228. static int xfrm_api_check(struct net_device *dev)
  229. {
  230. #ifdef CONFIG_XFRM_OFFLOAD
  231. if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
  232. !(dev->features & NETIF_F_HW_ESP))
  233. return NOTIFY_BAD;
  234. if ((dev->features & NETIF_F_HW_ESP) &&
  235. (!(dev->xfrmdev_ops &&
  236. dev->xfrmdev_ops->xdo_dev_state_add &&
  237. dev->xfrmdev_ops->xdo_dev_state_delete)))
  238. return NOTIFY_BAD;
  239. #else
  240. if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
  241. return NOTIFY_BAD;
  242. #endif
  243. return NOTIFY_DONE;
  244. }
  245. static int xfrm_dev_register(struct net_device *dev)
  246. {
  247. return xfrm_api_check(dev);
  248. }
  249. static int xfrm_dev_feat_change(struct net_device *dev)
  250. {
  251. return xfrm_api_check(dev);
  252. }
  253. static int xfrm_dev_down(struct net_device *dev)
  254. {
  255. if (dev->features & NETIF_F_HW_ESP)
  256. xfrm_dev_state_flush(dev_net(dev), dev, true);
  257. return NOTIFY_DONE;
  258. }
  259. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  260. {
  261. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  262. switch (event) {
  263. case NETDEV_REGISTER:
  264. return xfrm_dev_register(dev);
  265. case NETDEV_FEAT_CHANGE:
  266. return xfrm_dev_feat_change(dev);
  267. case NETDEV_DOWN:
  268. case NETDEV_UNREGISTER:
  269. return xfrm_dev_down(dev);
  270. }
  271. return NOTIFY_DONE;
  272. }
  273. static struct notifier_block xfrm_dev_notifier = {
  274. .notifier_call = xfrm_dev_event,
  275. };
  276. void __init xfrm_dev_init(void)
  277. {
  278. register_netdevice_notifier(&xfrm_dev_notifier);
  279. }