vport-internal_dev.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2007-2012 Nicira, Inc.
  4. */
  5. #include <linux/if_vlan.h>
  6. #include <linux/kernel.h>
  7. #include <linux/netdevice.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/ethtool.h>
  10. #include <linux/skbuff.h>
  11. #include <net/dst.h>
  12. #include <net/xfrm.h>
  13. #include <net/rtnetlink.h>
  14. #include "datapath.h"
  15. #include "vport-internal_dev.h"
  16. #include "vport-netdev.h"
  17. struct internal_dev {
  18. struct vport *vport;
  19. };
  20. static struct vport_ops ovs_internal_vport_ops;
  21. static struct internal_dev *internal_dev_priv(struct net_device *netdev)
  22. {
  23. return netdev_priv(netdev);
  24. }
  25. /* Called with rcu_read_lock_bh. */
  26. static netdev_tx_t
  27. internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
  28. {
  29. int len, err;
  30. /* store len value because skb can be freed inside ovs_vport_receive() */
  31. len = skb->len;
  32. rcu_read_lock();
  33. err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
  34. rcu_read_unlock();
  35. if (likely(!err))
  36. dev_sw_netstats_tx_add(netdev, 1, len);
  37. else
  38. netdev->stats.tx_errors++;
  39. return NETDEV_TX_OK;
  40. }
  41. static int internal_dev_open(struct net_device *netdev)
  42. {
  43. netif_start_queue(netdev);
  44. return 0;
  45. }
  46. static int internal_dev_stop(struct net_device *netdev)
  47. {
  48. netif_stop_queue(netdev);
  49. return 0;
  50. }
  51. static void internal_dev_getinfo(struct net_device *netdev,
  52. struct ethtool_drvinfo *info)
  53. {
  54. strscpy(info->driver, "openvswitch", sizeof(info->driver));
  55. }
  56. static const struct ethtool_ops internal_dev_ethtool_ops = {
  57. .get_drvinfo = internal_dev_getinfo,
  58. .get_link = ethtool_op_get_link,
  59. };
  60. static void internal_dev_destructor(struct net_device *dev)
  61. {
  62. struct vport *vport = ovs_internal_dev_get_vport(dev);
  63. ovs_vport_free(vport);
  64. }
  65. static const struct net_device_ops internal_dev_netdev_ops = {
  66. .ndo_open = internal_dev_open,
  67. .ndo_stop = internal_dev_stop,
  68. .ndo_start_xmit = internal_dev_xmit,
  69. .ndo_set_mac_address = eth_mac_addr,
  70. };
  71. static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
  72. .kind = "openvswitch",
  73. };
  74. static void do_setup(struct net_device *netdev)
  75. {
  76. ether_setup(netdev);
  77. netdev->max_mtu = ETH_MAX_MTU;
  78. netdev->netdev_ops = &internal_dev_netdev_ops;
  79. netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
  80. netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
  81. IFF_NO_QUEUE;
  82. netdev->lltx = true;
  83. netdev->needs_free_netdev = true;
  84. netdev->priv_destructor = NULL;
  85. netdev->ethtool_ops = &internal_dev_ethtool_ops;
  86. netdev->rtnl_link_ops = &internal_dev_link_ops;
  87. netdev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
  88. NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE |
  89. NETIF_F_GSO_ENCAP_ALL;
  90. netdev->vlan_features = netdev->features;
  91. netdev->hw_enc_features = netdev->features;
  92. netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
  93. netdev->hw_features = netdev->features;
  94. eth_hw_addr_random(netdev);
  95. }
  96. static struct vport *internal_dev_create(const struct vport_parms *parms)
  97. {
  98. struct vport *vport;
  99. struct internal_dev *internal_dev;
  100. struct net_device *dev;
  101. int err;
  102. vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
  103. if (IS_ERR(vport)) {
  104. err = PTR_ERR(vport);
  105. goto error;
  106. }
  107. dev = alloc_netdev(sizeof(struct internal_dev),
  108. parms->name, NET_NAME_USER, do_setup);
  109. vport->dev = dev;
  110. if (!vport->dev) {
  111. err = -ENOMEM;
  112. goto error_free_vport;
  113. }
  114. dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
  115. dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
  116. dev->ifindex = parms->desired_ifindex;
  117. internal_dev = internal_dev_priv(vport->dev);
  118. internal_dev->vport = vport;
  119. /* Restrict bridge port to current netns. */
  120. if (vport->port_no == OVSP_LOCAL)
  121. vport->dev->netns_local = true;
  122. rtnl_lock();
  123. err = register_netdevice(vport->dev);
  124. if (err)
  125. goto error_unlock;
  126. vport->dev->priv_destructor = internal_dev_destructor;
  127. dev_set_promiscuity(vport->dev, 1);
  128. rtnl_unlock();
  129. netif_start_queue(vport->dev);
  130. return vport;
  131. error_unlock:
  132. rtnl_unlock();
  133. free_netdev(dev);
  134. error_free_vport:
  135. ovs_vport_free(vport);
  136. error:
  137. return ERR_PTR(err);
  138. }
  139. static void internal_dev_destroy(struct vport *vport)
  140. {
  141. netif_stop_queue(vport->dev);
  142. rtnl_lock();
  143. dev_set_promiscuity(vport->dev, -1);
  144. /* unregister_netdevice() waits for an RCU grace period. */
  145. unregister_netdevice(vport->dev);
  146. rtnl_unlock();
  147. }
  148. static int internal_dev_recv(struct sk_buff *skb)
  149. {
  150. struct net_device *netdev = skb->dev;
  151. if (unlikely(!(netdev->flags & IFF_UP))) {
  152. kfree_skb(skb);
  153. netdev->stats.rx_dropped++;
  154. return NETDEV_TX_OK;
  155. }
  156. skb_dst_drop(skb);
  157. nf_reset_ct(skb);
  158. secpath_reset(skb);
  159. skb->pkt_type = PACKET_HOST;
  160. skb->protocol = eth_type_trans(skb, netdev);
  161. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  162. dev_sw_netstats_rx_add(netdev, skb->len);
  163. netif_rx(skb);
  164. return NETDEV_TX_OK;
  165. }
  166. static struct vport_ops ovs_internal_vport_ops = {
  167. .type = OVS_VPORT_TYPE_INTERNAL,
  168. .create = internal_dev_create,
  169. .destroy = internal_dev_destroy,
  170. .send = internal_dev_recv,
  171. };
  172. int ovs_is_internal_dev(const struct net_device *netdev)
  173. {
  174. return netdev->netdev_ops == &internal_dev_netdev_ops;
  175. }
  176. struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
  177. {
  178. if (!ovs_is_internal_dev(netdev))
  179. return NULL;
  180. return internal_dev_priv(netdev)->vport;
  181. }
  182. int ovs_internal_dev_rtnl_link_register(void)
  183. {
  184. int err;
  185. err = rtnl_link_register(&internal_dev_link_ops);
  186. if (err < 0)
  187. return err;
  188. err = ovs_vport_ops_register(&ovs_internal_vport_ops);
  189. if (err < 0)
  190. rtnl_link_unregister(&internal_dev_link_ops);
  191. return err;
  192. }
  193. void ovs_internal_dev_rtnl_link_unregister(void)
  194. {
  195. ovs_vport_ops_unregister(&ovs_internal_vport_ops);
  196. rtnl_link_unregister(&internal_dev_link_ops);
  197. }