vport.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2007-2014 Nicira, Inc.
  4. */
  5. #include <linux/etherdevice.h>
  6. #include <linux/if.h>
  7. #include <linux/if_vlan.h>
  8. #include <linux/jhash.h>
  9. #include <linux/kernel.h>
  10. #include <linux/list.h>
  11. #include <linux/mutex.h>
  12. #include <linux/percpu.h>
  13. #include <linux/rcupdate.h>
  14. #include <linux/rtnetlink.h>
  15. #include <linux/compat.h>
  16. #include <net/net_namespace.h>
  17. #include <linux/module.h>
  18. #include "datapath.h"
  19. #include "vport.h"
  20. #include "vport-internal_dev.h"
  21. static LIST_HEAD(vport_ops_list);
  22. /* Protected by RCU read lock for reading, ovs_mutex for writing. */
  23. static struct hlist_head *dev_table;
  24. #define VPORT_HASH_BUCKETS 1024
  25. /**
  26. * ovs_vport_init - initialize vport subsystem
  27. *
  28. * Called at module load time to initialize the vport subsystem.
  29. */
  30. int ovs_vport_init(void)
  31. {
  32. dev_table = kcalloc(VPORT_HASH_BUCKETS, sizeof(struct hlist_head),
  33. GFP_KERNEL);
  34. if (!dev_table)
  35. return -ENOMEM;
  36. return 0;
  37. }
  38. /**
  39. * ovs_vport_exit - shutdown vport subsystem
  40. *
  41. * Called at module exit time to shutdown the vport subsystem.
  42. */
  43. void ovs_vport_exit(void)
  44. {
  45. kfree(dev_table);
  46. }
  47. static struct hlist_head *hash_bucket(const struct net *net, const char *name)
  48. {
  49. unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
  50. return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
  51. }
  52. int __ovs_vport_ops_register(struct vport_ops *ops)
  53. {
  54. int err = -EEXIST;
  55. struct vport_ops *o;
  56. ovs_lock();
  57. list_for_each_entry(o, &vport_ops_list, list)
  58. if (ops->type == o->type)
  59. goto errout;
  60. list_add_tail(&ops->list, &vport_ops_list);
  61. err = 0;
  62. errout:
  63. ovs_unlock();
  64. return err;
  65. }
  66. EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
  67. void ovs_vport_ops_unregister(struct vport_ops *ops)
  68. {
  69. ovs_lock();
  70. list_del(&ops->list);
  71. ovs_unlock();
  72. }
  73. EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
  74. /**
  75. * ovs_vport_locate - find a port that has already been created
  76. *
  77. * @net: network namespace
  78. * @name: name of port to find
  79. *
  80. * Must be called with ovs or RCU read lock.
  81. */
  82. struct vport *ovs_vport_locate(const struct net *net, const char *name)
  83. {
  84. struct hlist_head *bucket = hash_bucket(net, name);
  85. struct vport *vport;
  86. hlist_for_each_entry_rcu(vport, bucket, hash_node,
  87. lockdep_ovsl_is_held())
  88. if (!strcmp(name, ovs_vport_name(vport)) &&
  89. net_eq(ovs_dp_get_net(vport->dp), net))
  90. return vport;
  91. return NULL;
  92. }
  93. /**
  94. * ovs_vport_alloc - allocate and initialize new vport
  95. *
  96. * @priv_size: Size of private data area to allocate.
  97. * @ops: vport device ops
  98. * @parms: information about new vport.
  99. *
  100. * Allocate and initialize a new vport defined by @ops. The vport will contain
  101. * a private data area of size @priv_size that can be accessed using
  102. * vport_priv(). Some parameters of the vport will be initialized from @parms.
  103. * @vports that are no longer needed should be released with
  104. * vport_free().
  105. */
  106. struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
  107. const struct vport_parms *parms)
  108. {
  109. struct vport *vport;
  110. size_t alloc_size;
  111. int err;
  112. alloc_size = sizeof(struct vport);
  113. if (priv_size) {
  114. alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
  115. alloc_size += priv_size;
  116. }
  117. vport = kzalloc(alloc_size, GFP_KERNEL);
  118. if (!vport)
  119. return ERR_PTR(-ENOMEM);
  120. vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
  121. if (!vport->upcall_stats) {
  122. err = -ENOMEM;
  123. goto err_kfree_vport;
  124. }
  125. vport->dp = parms->dp;
  126. vport->port_no = parms->port_no;
  127. vport->ops = ops;
  128. INIT_HLIST_NODE(&vport->dp_hash_node);
  129. if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
  130. err = -EINVAL;
  131. goto err_free_percpu;
  132. }
  133. return vport;
  134. err_free_percpu:
  135. free_percpu(vport->upcall_stats);
  136. err_kfree_vport:
  137. kfree(vport);
  138. return ERR_PTR(err);
  139. }
  140. EXPORT_SYMBOL_GPL(ovs_vport_alloc);
  141. /**
  142. * ovs_vport_free - uninitialize and free vport
  143. *
  144. * @vport: vport to free
  145. *
  146. * Frees a vport allocated with vport_alloc() when it is no longer needed.
  147. *
  148. * The caller must ensure that an RCU grace period has passed since the last
  149. * time @vport was in a datapath.
  150. */
  151. void ovs_vport_free(struct vport *vport)
  152. {
  153. /* vport is freed from RCU callback or error path, Therefore
  154. * it is safe to use raw dereference.
  155. */
  156. kfree(rcu_dereference_raw(vport->upcall_portids));
  157. free_percpu(vport->upcall_stats);
  158. kfree(vport);
  159. }
  160. EXPORT_SYMBOL_GPL(ovs_vport_free);
  161. static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
  162. {
  163. struct vport_ops *ops;
  164. list_for_each_entry(ops, &vport_ops_list, list)
  165. if (ops->type == parms->type)
  166. return ops;
  167. return NULL;
  168. }
  169. /**
  170. * ovs_vport_add - add vport device (for kernel callers)
  171. *
  172. * @parms: Information about new vport.
  173. *
  174. * Creates a new vport with the specified configuration (which is dependent on
  175. * device type). ovs_mutex must be held.
  176. */
  177. struct vport *ovs_vport_add(const struct vport_parms *parms)
  178. {
  179. struct vport_ops *ops;
  180. struct vport *vport;
  181. ops = ovs_vport_lookup(parms);
  182. if (ops) {
  183. struct hlist_head *bucket;
  184. if (!try_module_get(ops->owner))
  185. return ERR_PTR(-EAFNOSUPPORT);
  186. vport = ops->create(parms);
  187. if (IS_ERR(vport)) {
  188. module_put(ops->owner);
  189. return vport;
  190. }
  191. bucket = hash_bucket(ovs_dp_get_net(vport->dp),
  192. ovs_vport_name(vport));
  193. hlist_add_head_rcu(&vport->hash_node, bucket);
  194. return vport;
  195. }
  196. /* Unlock to attempt module load and return -EAGAIN if load
  197. * was successful as we need to restart the port addition
  198. * workflow.
  199. */
  200. ovs_unlock();
  201. request_module("vport-type-%d", parms->type);
  202. ovs_lock();
  203. if (!ovs_vport_lookup(parms))
  204. return ERR_PTR(-EAFNOSUPPORT);
  205. else
  206. return ERR_PTR(-EAGAIN);
  207. }
  208. /**
  209. * ovs_vport_set_options - modify existing vport device (for kernel callers)
  210. *
  211. * @vport: vport to modify.
  212. * @options: New configuration.
  213. *
  214. * Modifies an existing device with the specified configuration (which is
  215. * dependent on device type). ovs_mutex must be held.
  216. */
  217. int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
  218. {
  219. if (!vport->ops->set_options)
  220. return -EOPNOTSUPP;
  221. return vport->ops->set_options(vport, options);
  222. }
  223. /**
  224. * ovs_vport_del - delete existing vport device
  225. *
  226. * @vport: vport to delete.
  227. *
  228. * Detaches @vport from its datapath and destroys it. ovs_mutex must
  229. * be held.
  230. */
  231. void ovs_vport_del(struct vport *vport)
  232. {
  233. hlist_del_rcu(&vport->hash_node);
  234. module_put(vport->ops->owner);
  235. vport->ops->destroy(vport);
  236. }
  237. /**
  238. * ovs_vport_get_stats - retrieve device stats
  239. *
  240. * @vport: vport from which to retrieve the stats
  241. * @stats: location to store stats
  242. *
  243. * Retrieves transmit, receive, and error stats for the given device.
  244. *
  245. * Must be called with ovs_mutex or rcu_read_lock.
  246. */
  247. void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
  248. {
  249. const struct rtnl_link_stats64 *dev_stats;
  250. struct rtnl_link_stats64 temp;
  251. dev_stats = dev_get_stats(vport->dev, &temp);
  252. stats->rx_errors = dev_stats->rx_errors;
  253. stats->tx_errors = dev_stats->tx_errors;
  254. stats->tx_dropped = dev_stats->tx_dropped;
  255. stats->rx_dropped = dev_stats->rx_dropped;
  256. stats->rx_bytes = dev_stats->rx_bytes;
  257. stats->rx_packets = dev_stats->rx_packets;
  258. stats->tx_bytes = dev_stats->tx_bytes;
  259. stats->tx_packets = dev_stats->tx_packets;
  260. }
  261. /**
  262. * ovs_vport_get_upcall_stats - retrieve upcall stats
  263. *
  264. * @vport: vport from which to retrieve the stats.
  265. * @skb: sk_buff where upcall stats should be appended.
  266. *
  267. * Retrieves upcall stats for the given device.
  268. *
  269. * Must be called with ovs_mutex or rcu_read_lock.
  270. */
  271. int ovs_vport_get_upcall_stats(struct vport *vport, struct sk_buff *skb)
  272. {
  273. struct nlattr *nla;
  274. int i;
  275. __u64 tx_success = 0;
  276. __u64 tx_fail = 0;
  277. for_each_possible_cpu(i) {
  278. const struct vport_upcall_stats_percpu *stats;
  279. unsigned int start;
  280. stats = per_cpu_ptr(vport->upcall_stats, i);
  281. do {
  282. start = u64_stats_fetch_begin(&stats->syncp);
  283. tx_success += u64_stats_read(&stats->n_success);
  284. tx_fail += u64_stats_read(&stats->n_fail);
  285. } while (u64_stats_fetch_retry(&stats->syncp, start));
  286. }
  287. nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
  288. if (!nla)
  289. return -EMSGSIZE;
  290. if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_ATTR_SUCCESS, tx_success,
  291. OVS_VPORT_ATTR_PAD)) {
  292. nla_nest_cancel(skb, nla);
  293. return -EMSGSIZE;
  294. }
  295. if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_ATTR_FAIL, tx_fail,
  296. OVS_VPORT_ATTR_PAD)) {
  297. nla_nest_cancel(skb, nla);
  298. return -EMSGSIZE;
  299. }
  300. nla_nest_end(skb, nla);
  301. return 0;
  302. }
  303. /**
  304. * ovs_vport_get_options - retrieve device options
  305. *
  306. * @vport: vport from which to retrieve the options.
  307. * @skb: sk_buff where options should be appended.
  308. *
  309. * Retrieves the configuration of the given device, appending an
  310. * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
  311. * vport-specific attributes to @skb.
  312. *
  313. * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
  314. * negative error code if a real error occurred. If an error occurs, @skb is
  315. * left unmodified.
  316. *
  317. * Must be called with ovs_mutex or rcu_read_lock.
  318. */
  319. int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
  320. {
  321. struct nlattr *nla;
  322. int err;
  323. if (!vport->ops->get_options)
  324. return 0;
  325. nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_OPTIONS);
  326. if (!nla)
  327. return -EMSGSIZE;
  328. err = vport->ops->get_options(vport, skb);
  329. if (err) {
  330. nla_nest_cancel(skb, nla);
  331. return err;
  332. }
  333. nla_nest_end(skb, nla);
  334. return 0;
  335. }
  336. /**
  337. * ovs_vport_set_upcall_portids - set upcall portids of @vport.
  338. *
  339. * @vport: vport to modify.
  340. * @ids: new configuration, an array of port ids.
  341. *
  342. * Sets the vport's upcall_portids to @ids.
  343. *
  344. * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
  345. * as an array of U32.
  346. *
  347. * Must be called with ovs_mutex.
  348. */
  349. int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
  350. {
  351. struct vport_portids *old, *vport_portids;
  352. if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
  353. return -EINVAL;
  354. old = ovsl_dereference(vport->upcall_portids);
  355. vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
  356. GFP_KERNEL);
  357. if (!vport_portids)
  358. return -ENOMEM;
  359. vport_portids->n_ids = nla_len(ids) / sizeof(u32);
  360. vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
  361. nla_memcpy(vport_portids->ids, ids, nla_len(ids));
  362. rcu_assign_pointer(vport->upcall_portids, vport_portids);
  363. if (old)
  364. kfree_rcu(old, rcu);
  365. return 0;
  366. }
  367. /**
  368. * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
  369. *
  370. * @vport: vport from which to retrieve the portids.
  371. * @skb: sk_buff where portids should be appended.
  372. *
  373. * Retrieves the configuration of the given vport, appending the
  374. * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
  375. * portids to @skb.
  376. *
  377. * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
  378. * If an error occurs, @skb is left unmodified. Must be called with
  379. * ovs_mutex or rcu_read_lock.
  380. */
  381. int ovs_vport_get_upcall_portids(const struct vport *vport,
  382. struct sk_buff *skb)
  383. {
  384. struct vport_portids *ids;
  385. ids = rcu_dereference_ovsl(vport->upcall_portids);
  386. if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
  387. return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
  388. ids->n_ids * sizeof(u32), (void *)ids->ids);
  389. else
  390. return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
  391. }
  392. /**
  393. * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
  394. *
  395. * @vport: vport from which the missed packet is received.
  396. * @skb: skb that the missed packet was received.
  397. *
  398. * Uses the skb_get_hash() to select the upcall portid to send the
  399. * upcall.
  400. *
  401. * Returns the portid of the target socket. Must be called with rcu_read_lock.
  402. */
  403. u32 ovs_vport_find_upcall_portid(const struct vport *vport,
  404. struct sk_buff *skb)
  405. {
  406. struct vport_portids *ids;
  407. u32 ids_index;
  408. u32 hash;
  409. ids = rcu_dereference(vport->upcall_portids);
  410. /* If there is only one portid, select it in the fast-path. */
  411. if (ids->n_ids == 1)
  412. return ids->ids[0];
  413. hash = skb_get_hash(skb);
  414. ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
  415. return ids->ids[ids_index];
  416. }
  417. /**
  418. * ovs_vport_receive - pass up received packet to the datapath for processing
  419. *
  420. * @vport: vport that received the packet
  421. * @skb: skb that was received
  422. * @tun_info: tunnel (if any) that carried packet
  423. *
  424. * Must be called with rcu_read_lock. The packet cannot be shared and
  425. * skb->data should point to the Ethernet header.
  426. */
  427. int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
  428. const struct ip_tunnel_info *tun_info)
  429. {
  430. struct sw_flow_key key;
  431. int error;
  432. OVS_CB(skb)->input_vport = vport;
  433. OVS_CB(skb)->mru = 0;
  434. OVS_CB(skb)->cutlen = 0;
  435. OVS_CB(skb)->probability = 0;
  436. if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
  437. u32 mark;
  438. mark = skb->mark;
  439. skb_scrub_packet(skb, true);
  440. skb->mark = mark;
  441. tun_info = NULL;
  442. }
  443. /* Extract flow from 'skb' into 'key'. */
  444. error = ovs_flow_key_extract(tun_info, skb, &key);
  445. if (unlikely(error)) {
  446. kfree_skb(skb);
  447. return error;
  448. }
  449. ovs_dp_process_packet(skb, &key);
  450. return 0;
  451. }
  452. static int packet_length(const struct sk_buff *skb,
  453. struct net_device *dev)
  454. {
  455. int length = skb->len - dev->hard_header_len;
  456. if (!skb_vlan_tag_present(skb) &&
  457. eth_type_vlan(skb->protocol))
  458. length -= VLAN_HLEN;
  459. /* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
  460. * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
  461. * account for 802.1ad. e.g. is_skb_forwardable().
  462. */
  463. return length > 0 ? length : 0;
  464. }
  465. void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
  466. {
  467. int mtu = vport->dev->mtu;
  468. switch (vport->dev->type) {
  469. case ARPHRD_NONE:
  470. if (mac_proto == MAC_PROTO_ETHERNET) {
  471. skb_reset_network_header(skb);
  472. skb_reset_mac_len(skb);
  473. skb->protocol = htons(ETH_P_TEB);
  474. } else if (mac_proto != MAC_PROTO_NONE) {
  475. WARN_ON_ONCE(1);
  476. goto drop;
  477. }
  478. break;
  479. case ARPHRD_ETHER:
  480. if (mac_proto != MAC_PROTO_ETHERNET)
  481. goto drop;
  482. break;
  483. default:
  484. goto drop;
  485. }
  486. if (unlikely(packet_length(skb, vport->dev) > mtu &&
  487. !skb_is_gso(skb))) {
  488. vport->dev->stats.tx_errors++;
  489. if (vport->dev->flags & IFF_UP)
  490. net_warn_ratelimited("%s: dropped over-mtu packet: "
  491. "%d > %d\n", vport->dev->name,
  492. packet_length(skb, vport->dev),
  493. mtu);
  494. goto drop;
  495. }
  496. skb->dev = vport->dev;
  497. skb_clear_tstamp(skb);
  498. vport->ops->send(skb);
  499. return;
  500. drop:
  501. kfree_skb(skb);
  502. }