hsr_slave.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2011-2014 Autronica Fire and Security AS
  3. *
  4. * Author(s):
  5. * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6. *
  7. * Frame handler other utility functions for HSR and PRP.
  8. */
  9. #include "hsr_slave.h"
  10. #include <linux/etherdevice.h>
  11. #include <linux/if_arp.h>
  12. #include <linux/if_vlan.h>
  13. #include "hsr_main.h"
  14. #include "hsr_device.h"
  15. #include "hsr_forward.h"
  16. #include "hsr_framereg.h"
  17. bool hsr_invalid_dan_ingress_frame(__be16 protocol)
  18. {
  19. return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
  20. }
  21. static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
  22. {
  23. struct sk_buff *skb = *pskb;
  24. struct hsr_port *port;
  25. struct hsr_priv *hsr;
  26. __be16 protocol;
  27. /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
  28. if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
  29. return RX_HANDLER_PASS;
  30. if (!skb_mac_header_was_set(skb)) {
  31. WARN_ONCE(1, "%s: skb invalid", __func__);
  32. return RX_HANDLER_PASS;
  33. }
  34. port = hsr_port_get_rcu(skb->dev);
  35. if (!port)
  36. goto finish_pass;
  37. hsr = port->hsr;
  38. if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
  39. /* Directly kill frames sent by ourselves */
  40. kfree_skb(skb);
  41. goto finish_consume;
  42. }
  43. /* For HSR, only tagged frames are expected (unless the device offloads
  44. * HSR tag removal), but for PRP there could be non tagged frames as
  45. * well from Single attached nodes (SANs).
  46. */
  47. protocol = eth_hdr(skb)->h_proto;
  48. if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
  49. port->type != HSR_PT_INTERLINK &&
  50. hsr->proto_ops->invalid_dan_ingress_frame &&
  51. hsr->proto_ops->invalid_dan_ingress_frame(protocol))
  52. goto finish_pass;
  53. skb_push(skb, ETH_HLEN);
  54. skb_reset_mac_header(skb);
  55. if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
  56. protocol == htons(ETH_P_HSR)) {
  57. if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) {
  58. kfree_skb(skb);
  59. goto finish_consume;
  60. }
  61. skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
  62. }
  63. skb_reset_mac_len(skb);
  64. /* Only the frames received over the interlink port will assign a
  65. * sequence number and require synchronisation vs other sender.
  66. */
  67. if (port->type == HSR_PT_INTERLINK) {
  68. spin_lock_bh(&hsr->seqnr_lock);
  69. hsr_forward_skb(skb, port);
  70. spin_unlock_bh(&hsr->seqnr_lock);
  71. } else {
  72. hsr_forward_skb(skb, port);
  73. }
  74. finish_consume:
  75. return RX_HANDLER_CONSUMED;
  76. finish_pass:
  77. return RX_HANDLER_PASS;
  78. }
  79. bool hsr_port_exists(const struct net_device *dev)
  80. {
  81. return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
  82. }
  83. static int hsr_check_dev_ok(struct net_device *dev,
  84. struct netlink_ext_ack *extack)
  85. {
  86. /* Don't allow HSR on non-ethernet like devices */
  87. if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
  88. dev->addr_len != ETH_ALEN) {
  89. NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
  90. return -EINVAL;
  91. }
  92. /* Don't allow enslaving hsr devices */
  93. if (is_hsr_master(dev)) {
  94. NL_SET_ERR_MSG_MOD(extack,
  95. "Cannot create trees of HSR devices.");
  96. return -EINVAL;
  97. }
  98. if (hsr_port_exists(dev)) {
  99. NL_SET_ERR_MSG_MOD(extack,
  100. "This device is already a HSR slave.");
  101. return -EINVAL;
  102. }
  103. if (is_vlan_dev(dev)) {
  104. NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
  105. return -EINVAL;
  106. }
  107. if (dev->priv_flags & IFF_DONT_BRIDGE) {
  108. NL_SET_ERR_MSG_MOD(extack,
  109. "This device does not support bridging.");
  110. return -EOPNOTSUPP;
  111. }
  112. /* HSR over bonded devices has not been tested, but I'm not sure it
  113. * won't work...
  114. */
  115. return 0;
  116. }
  117. /* Setup device to be added to the HSR bridge. */
  118. static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
  119. struct hsr_port *port,
  120. struct netlink_ext_ack *extack)
  121. {
  122. struct net_device *hsr_dev;
  123. struct hsr_port *master;
  124. int res;
  125. /* Don't use promiscuous mode for offload since L2 frame forward
  126. * happens at the offloaded hardware.
  127. */
  128. if (!port->hsr->fwd_offloaded) {
  129. res = dev_set_promiscuity(dev, 1);
  130. if (res)
  131. return res;
  132. }
  133. master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
  134. hsr_dev = master->dev;
  135. res = netdev_upper_dev_link(dev, hsr_dev, extack);
  136. if (res)
  137. goto fail_upper_dev_link;
  138. res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
  139. if (res)
  140. goto fail_rx_handler;
  141. dev_disable_lro(dev);
  142. return 0;
  143. fail_rx_handler:
  144. netdev_upper_dev_unlink(dev, hsr_dev);
  145. fail_upper_dev_link:
  146. if (!port->hsr->fwd_offloaded)
  147. dev_set_promiscuity(dev, -1);
  148. return res;
  149. }
  150. int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
  151. enum hsr_port_type type, struct netlink_ext_ack *extack)
  152. {
  153. struct hsr_port *port, *master;
  154. int res;
  155. if (type != HSR_PT_MASTER) {
  156. res = hsr_check_dev_ok(dev, extack);
  157. if (res)
  158. return res;
  159. }
  160. port = hsr_port_get_hsr(hsr, type);
  161. if (port)
  162. return -EBUSY; /* This port already exists */
  163. port = kzalloc(sizeof(*port), GFP_KERNEL);
  164. if (!port)
  165. return -ENOMEM;
  166. port->hsr = hsr;
  167. port->dev = dev;
  168. port->type = type;
  169. if (type != HSR_PT_MASTER) {
  170. res = hsr_portdev_setup(hsr, dev, port, extack);
  171. if (res)
  172. goto fail_dev_setup;
  173. }
  174. list_add_tail_rcu(&port->port_list, &hsr->ports);
  175. synchronize_rcu();
  176. master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
  177. netdev_update_features(master->dev);
  178. dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
  179. return 0;
  180. fail_dev_setup:
  181. kfree(port);
  182. return res;
  183. }
  184. void hsr_del_port(struct hsr_port *port)
  185. {
  186. struct hsr_priv *hsr;
  187. struct hsr_port *master;
  188. hsr = port->hsr;
  189. master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
  190. list_del_rcu(&port->port_list);
  191. if (port != master) {
  192. netdev_update_features(master->dev);
  193. dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
  194. netdev_rx_handler_unregister(port->dev);
  195. if (!port->hsr->fwd_offloaded)
  196. dev_set_promiscuity(port->dev, -1);
  197. netdev_upper_dev_unlink(port->dev, master->dev);
  198. }
  199. synchronize_rcu();
  200. kfree(port);
  201. }