hsr_forward.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2011-2014 Autronica Fire and Security AS
  3. *
  4. * Author(s):
  5. * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6. *
  7. * Frame router for HSR and PRP.
  8. */
  9. #include "hsr_forward.h"
  10. #include <linux/types.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/if_vlan.h>
  14. #include "hsr_main.h"
  15. #include "hsr_framereg.h"
  16. struct hsr_node;
  17. /* The uses I can see for these HSR supervision frames are:
  18. * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
  19. * 22") to reset any sequence_nr counters belonging to that node. Useful if
  20. * the other node's counter has been reset for some reason.
  21. * --
  22. * Or not - resetting the counter and bridging the frame would create a
  23. * loop, unfortunately.
  24. *
  25. * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
  26. * frame is received from a particular node, we know something is wrong.
  27. * We just register these (as with normal frames) and throw them away.
  28. *
  29. * 3) Allow different MAC addresses for the two slave interfaces, using the
  30. * MacAddressA field.
  31. */
  32. static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
  33. {
  34. struct ethhdr *eth_hdr;
  35. struct hsr_sup_tag *hsr_sup_tag;
  36. struct hsrv1_ethhdr_sp *hsr_V1_hdr;
  37. struct hsr_sup_tlv *hsr_sup_tlv;
  38. u16 total_length = 0;
  39. WARN_ON_ONCE(!skb_mac_header_was_set(skb));
  40. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  41. /* Correct addr? */
  42. if (!ether_addr_equal(eth_hdr->h_dest,
  43. hsr->sup_multicast_addr))
  44. return false;
  45. /* Correct ether type?. */
  46. if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
  47. eth_hdr->h_proto == htons(ETH_P_HSR)))
  48. return false;
  49. /* Get the supervision header from correct location. */
  50. if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
  51. total_length = sizeof(struct hsrv1_ethhdr_sp);
  52. if (!pskb_may_pull(skb, total_length))
  53. return false;
  54. hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
  55. if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
  56. return false;
  57. hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
  58. } else {
  59. total_length = sizeof(struct hsrv0_ethhdr_sp);
  60. if (!pskb_may_pull(skb, total_length))
  61. return false;
  62. hsr_sup_tag =
  63. &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
  64. }
  65. if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
  66. hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
  67. hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
  68. hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
  69. return false;
  70. if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
  71. hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
  72. return false;
  73. /* Get next tlv */
  74. total_length += hsr_sup_tag->tlv.HSR_TLV_length;
  75. if (!pskb_may_pull(skb, total_length))
  76. return false;
  77. skb_pull(skb, total_length);
  78. hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
  79. skb_push(skb, total_length);
  80. /* if this is a redbox supervision frame we need to verify
  81. * that more data is available
  82. */
  83. if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
  84. /* tlv length must be a length of a mac address */
  85. if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
  86. return false;
  87. /* make sure another tlv follows */
  88. total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
  89. if (!pskb_may_pull(skb, total_length))
  90. return false;
  91. /* get next tlv */
  92. skb_pull(skb, total_length);
  93. hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
  94. skb_push(skb, total_length);
  95. }
  96. /* end of tlvs must follow at the end */
  97. if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
  98. hsr_sup_tlv->HSR_TLV_length != 0)
  99. return false;
  100. return true;
  101. }
  102. static bool is_proxy_supervision_frame(struct hsr_priv *hsr,
  103. struct sk_buff *skb)
  104. {
  105. struct hsr_sup_payload *payload;
  106. struct ethhdr *eth_hdr;
  107. u16 total_length = 0;
  108. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  109. /* Get the HSR protocol revision. */
  110. if (eth_hdr->h_proto == htons(ETH_P_HSR))
  111. total_length = sizeof(struct hsrv1_ethhdr_sp);
  112. else
  113. total_length = sizeof(struct hsrv0_ethhdr_sp);
  114. if (!pskb_may_pull(skb, total_length + sizeof(struct hsr_sup_payload)))
  115. return false;
  116. skb_pull(skb, total_length);
  117. payload = (struct hsr_sup_payload *)skb->data;
  118. skb_push(skb, total_length);
  119. /* For RedBox (HSR-SAN) check if we have received the supervision
  120. * frame with MAC addresses from own ProxyNodeTable.
  121. */
  122. return hsr_is_node_in_db(&hsr->proxy_node_db,
  123. payload->macaddress_A);
  124. }
  125. static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
  126. struct hsr_frame_info *frame)
  127. {
  128. struct sk_buff *skb;
  129. int copylen;
  130. unsigned char *dst, *src;
  131. skb_pull(skb_in, HSR_HLEN);
  132. skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
  133. skb_push(skb_in, HSR_HLEN);
  134. if (!skb)
  135. return NULL;
  136. skb_reset_mac_header(skb);
  137. if (skb->ip_summed == CHECKSUM_PARTIAL)
  138. skb->csum_start -= HSR_HLEN;
  139. copylen = 2 * ETH_ALEN;
  140. if (frame->is_vlan)
  141. copylen += VLAN_HLEN;
  142. src = skb_mac_header(skb_in);
  143. dst = skb_mac_header(skb);
  144. memcpy(dst, src, copylen);
  145. skb->protocol = eth_hdr(skb)->h_proto;
  146. return skb;
  147. }
  148. struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
  149. struct hsr_port *port)
  150. {
  151. if (!frame->skb_std) {
  152. if (frame->skb_hsr)
  153. frame->skb_std =
  154. create_stripped_skb_hsr(frame->skb_hsr, frame);
  155. else
  156. netdev_warn_once(port->dev,
  157. "Unexpected frame received in hsr_get_untagged_frame()\n");
  158. if (!frame->skb_std)
  159. return NULL;
  160. }
  161. return skb_clone(frame->skb_std, GFP_ATOMIC);
  162. }
  163. struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
  164. struct hsr_port *port)
  165. {
  166. if (!frame->skb_std) {
  167. if (frame->skb_prp) {
  168. /* trim the skb by len - HSR_HLEN to exclude RCT */
  169. skb_trim(frame->skb_prp,
  170. frame->skb_prp->len - HSR_HLEN);
  171. frame->skb_std =
  172. __pskb_copy(frame->skb_prp,
  173. skb_headroom(frame->skb_prp),
  174. GFP_ATOMIC);
  175. } else {
  176. /* Unexpected */
  177. WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
  178. __FILE__, __LINE__, port->dev->name);
  179. return NULL;
  180. }
  181. }
  182. return skb_clone(frame->skb_std, GFP_ATOMIC);
  183. }
  184. static void prp_set_lan_id(struct prp_rct *trailer,
  185. struct hsr_port *port)
  186. {
  187. int lane_id;
  188. if (port->type == HSR_PT_SLAVE_A)
  189. lane_id = 0;
  190. else
  191. lane_id = 1;
  192. /* Add net_id in the upper 3 bits of lane_id */
  193. lane_id |= port->hsr->net_id;
  194. set_prp_lan_id(trailer, lane_id);
  195. }
  196. /* Tailroom for PRP rct should have been created before calling this */
  197. static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
  198. struct hsr_frame_info *frame,
  199. struct hsr_port *port)
  200. {
  201. struct prp_rct *trailer;
  202. int min_size = ETH_ZLEN;
  203. int lsdu_size;
  204. if (!skb)
  205. return skb;
  206. if (frame->is_vlan)
  207. min_size = VLAN_ETH_ZLEN;
  208. if (skb_put_padto(skb, min_size))
  209. return NULL;
  210. trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
  211. lsdu_size = skb->len - 14;
  212. if (frame->is_vlan)
  213. lsdu_size -= 4;
  214. prp_set_lan_id(trailer, port);
  215. set_prp_LSDU_size(trailer, lsdu_size);
  216. trailer->sequence_nr = htons(frame->sequence_nr);
  217. trailer->PRP_suffix = htons(ETH_P_PRP);
  218. skb->protocol = eth_hdr(skb)->h_proto;
  219. return skb;
  220. }
  221. static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
  222. struct hsr_port *port)
  223. {
  224. int path_id;
  225. if (port->type == HSR_PT_SLAVE_A)
  226. path_id = 0;
  227. else
  228. path_id = 1;
  229. set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
  230. }
  231. static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
  232. struct hsr_frame_info *frame,
  233. struct hsr_port *port, u8 proto_version)
  234. {
  235. struct hsr_ethhdr *hsr_ethhdr;
  236. int lsdu_size;
  237. /* pad to minimum packet size which is 60 + 6 (HSR tag) */
  238. if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
  239. return NULL;
  240. lsdu_size = skb->len - 14;
  241. if (frame->is_vlan)
  242. lsdu_size -= 4;
  243. hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
  244. hsr_set_path_id(hsr_ethhdr, port);
  245. set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
  246. hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
  247. hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
  248. hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
  249. ETH_P_HSR : ETH_P_PRP);
  250. skb->protocol = hsr_ethhdr->ethhdr.h_proto;
  251. return skb;
  252. }
  253. /* If the original frame was an HSR tagged frame, just clone it to be sent
  254. * unchanged. Otherwise, create a private frame especially tagged for 'port'.
  255. */
  256. struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
  257. struct hsr_port *port)
  258. {
  259. unsigned char *dst, *src;
  260. struct sk_buff *skb;
  261. int movelen;
  262. if (frame->skb_hsr) {
  263. struct hsr_ethhdr *hsr_ethhdr =
  264. (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
  265. /* set the lane id properly */
  266. hsr_set_path_id(hsr_ethhdr, port);
  267. return skb_clone(frame->skb_hsr, GFP_ATOMIC);
  268. } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
  269. return skb_clone(frame->skb_std, GFP_ATOMIC);
  270. }
  271. /* Create the new skb with enough headroom to fit the HSR tag */
  272. skb = __pskb_copy(frame->skb_std,
  273. skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
  274. if (!skb)
  275. return NULL;
  276. skb_reset_mac_header(skb);
  277. if (skb->ip_summed == CHECKSUM_PARTIAL)
  278. skb->csum_start += HSR_HLEN;
  279. movelen = ETH_HLEN;
  280. if (frame->is_vlan)
  281. movelen += VLAN_HLEN;
  282. src = skb_mac_header(skb);
  283. dst = skb_push(skb, HSR_HLEN);
  284. memmove(dst, src, movelen);
  285. skb_reset_mac_header(skb);
  286. /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
  287. * that case
  288. */
  289. return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
  290. }
  291. struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
  292. struct hsr_port *port)
  293. {
  294. struct sk_buff *skb;
  295. if (frame->skb_prp) {
  296. struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
  297. if (trailer) {
  298. prp_set_lan_id(trailer, port);
  299. } else {
  300. WARN_ONCE(!trailer, "errored PRP skb");
  301. return NULL;
  302. }
  303. return skb_clone(frame->skb_prp, GFP_ATOMIC);
  304. } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
  305. return skb_clone(frame->skb_std, GFP_ATOMIC);
  306. }
  307. skb = skb_copy_expand(frame->skb_std, 0,
  308. skb_tailroom(frame->skb_std) + HSR_HLEN,
  309. GFP_ATOMIC);
  310. return prp_fill_rct(skb, frame, port);
  311. }
  312. static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
  313. struct hsr_node *node_src)
  314. {
  315. bool was_multicast_frame;
  316. int res, recv_len;
  317. was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
  318. hsr_addr_subst_source(node_src, skb);
  319. skb_pull(skb, ETH_HLEN);
  320. recv_len = skb->len;
  321. res = netif_rx(skb);
  322. if (res == NET_RX_DROP) {
  323. dev->stats.rx_dropped++;
  324. } else {
  325. dev->stats.rx_packets++;
  326. dev->stats.rx_bytes += recv_len;
  327. if (was_multicast_frame)
  328. dev->stats.multicast++;
  329. }
  330. }
  331. static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
  332. struct hsr_frame_info *frame)
  333. {
  334. if (frame->port_rcv->type == HSR_PT_MASTER) {
  335. hsr_addr_subst_dest(frame->node_src, skb, port);
  336. /* Address substitution (IEC62439-3 pp 26, 50): replace mac
  337. * address of outgoing frame with that of the outgoing slave's.
  338. */
  339. ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
  340. }
  341. /* When HSR node is used as RedBox - the frame received from HSR ring
  342. * requires source MAC address (SA) replacement to one which can be
  343. * recognized by SAN devices (otherwise, frames are dropped by switch)
  344. */
  345. if (port->type == HSR_PT_INTERLINK)
  346. ether_addr_copy(eth_hdr(skb)->h_source,
  347. port->hsr->macaddress_redbox);
  348. return dev_queue_xmit(skb);
  349. }
  350. bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
  351. {
  352. return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
  353. port->type == HSR_PT_SLAVE_B) ||
  354. (frame->port_rcv->type == HSR_PT_SLAVE_B &&
  355. port->type == HSR_PT_SLAVE_A));
  356. }
  357. bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
  358. {
  359. struct sk_buff *skb;
  360. if (port->dev->features & NETIF_F_HW_HSR_FWD)
  361. return prp_drop_frame(frame, port);
  362. /* RedBox specific frames dropping policies
  363. *
  364. * Do not send HSR supervisory frames to SAN devices
  365. */
  366. if (frame->is_supervision && port->type == HSR_PT_INTERLINK)
  367. return true;
  368. /* Do not forward to other HSR port (A or B) unicast frames which
  369. * are addressed to interlink port (and are in the ProxyNodeTable).
  370. */
  371. skb = frame->skb_hsr;
  372. if (skb && prp_drop_frame(frame, port) &&
  373. is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
  374. hsr_is_node_in_db(&port->hsr->proxy_node_db,
  375. eth_hdr(skb)->h_dest)) {
  376. return true;
  377. }
  378. /* Do not forward to port C (Interlink) frames from nodes A and B
  379. * if DA is in NodeTable.
  380. */
  381. if ((frame->port_rcv->type == HSR_PT_SLAVE_A ||
  382. frame->port_rcv->type == HSR_PT_SLAVE_B) &&
  383. port->type == HSR_PT_INTERLINK) {
  384. skb = frame->skb_hsr;
  385. if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
  386. hsr_is_node_in_db(&port->hsr->node_db,
  387. eth_hdr(skb)->h_dest)) {
  388. return true;
  389. }
  390. }
  391. /* Do not forward to port A and B unicast frames received on the
  392. * interlink port if it is addressed to one of nodes registered in
  393. * the ProxyNodeTable.
  394. */
  395. if ((port->type == HSR_PT_SLAVE_A || port->type == HSR_PT_SLAVE_B) &&
  396. frame->port_rcv->type == HSR_PT_INTERLINK) {
  397. skb = frame->skb_std;
  398. if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
  399. hsr_is_node_in_db(&port->hsr->proxy_node_db,
  400. eth_hdr(skb)->h_dest)) {
  401. return true;
  402. }
  403. }
  404. return false;
  405. }
  406. /* Forward the frame through all devices except:
  407. * - Back through the receiving device
  408. * - If it's a HSR frame: through a device where it has passed before
  409. * - if it's a PRP frame: through another PRP slave device (no bridge)
  410. * - To the local HSR master only if the frame is directly addressed to it, or
  411. * a non-supervision multicast or broadcast frame.
  412. *
  413. * HSR slave devices should insert a HSR tag into the frame, or forward the
  414. * frame unchanged if it's already tagged. Interlink devices should strip HSR
  415. * tags if they're of the non-HSR type (but only after duplicate discard). The
  416. * master device always strips HSR tags.
  417. */
  418. static void hsr_forward_do(struct hsr_frame_info *frame)
  419. {
  420. struct hsr_port *port;
  421. struct sk_buff *skb;
  422. bool sent = false;
  423. hsr_for_each_port(frame->port_rcv->hsr, port) {
  424. struct hsr_priv *hsr = port->hsr;
  425. /* Don't send frame back the way it came */
  426. if (port == frame->port_rcv)
  427. continue;
  428. /* Don't deliver locally unless we should */
  429. if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
  430. continue;
  431. /* Deliver frames directly addressed to us to master only */
  432. if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
  433. continue;
  434. /* If hardware duplicate generation is enabled, only send out
  435. * one port.
  436. */
  437. if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
  438. continue;
  439. /* Don't send frame over port where it has been sent before.
  440. * Also for SAN, this shouldn't be done.
  441. */
  442. if (!frame->is_from_san &&
  443. hsr_register_frame_out(port, frame->node_src,
  444. frame->sequence_nr))
  445. continue;
  446. if (frame->is_supervision && port->type == HSR_PT_MASTER &&
  447. !frame->is_proxy_supervision) {
  448. hsr_handle_sup_frame(frame);
  449. continue;
  450. }
  451. /* Check if frame is to be dropped. Eg. for PRP no forward
  452. * between ports, or sending HSR supervision to RedBox.
  453. */
  454. if (hsr->proto_ops->drop_frame &&
  455. hsr->proto_ops->drop_frame(frame, port))
  456. continue;
  457. if (port->type == HSR_PT_SLAVE_A ||
  458. port->type == HSR_PT_SLAVE_B)
  459. skb = hsr->proto_ops->create_tagged_frame(frame, port);
  460. else
  461. skb = hsr->proto_ops->get_untagged_frame(frame, port);
  462. if (!skb) {
  463. frame->port_rcv->dev->stats.rx_dropped++;
  464. continue;
  465. }
  466. skb->dev = port->dev;
  467. if (port->type == HSR_PT_MASTER) {
  468. hsr_deliver_master(skb, port->dev, frame->node_src);
  469. } else {
  470. if (!hsr_xmit(skb, port, frame))
  471. if (port->type == HSR_PT_SLAVE_A ||
  472. port->type == HSR_PT_SLAVE_B)
  473. sent = true;
  474. }
  475. }
  476. }
  477. static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
  478. struct hsr_frame_info *frame)
  479. {
  480. if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
  481. frame->is_local_exclusive = true;
  482. skb->pkt_type = PACKET_HOST;
  483. } else {
  484. frame->is_local_exclusive = false;
  485. }
  486. if (skb->pkt_type == PACKET_HOST ||
  487. skb->pkt_type == PACKET_MULTICAST ||
  488. skb->pkt_type == PACKET_BROADCAST) {
  489. frame->is_local_dest = true;
  490. } else {
  491. frame->is_local_dest = false;
  492. }
  493. }
  494. static void handle_std_frame(struct sk_buff *skb,
  495. struct hsr_frame_info *frame)
  496. {
  497. struct hsr_port *port = frame->port_rcv;
  498. struct hsr_priv *hsr = port->hsr;
  499. frame->skb_hsr = NULL;
  500. frame->skb_prp = NULL;
  501. frame->skb_std = skb;
  502. if (port->type != HSR_PT_MASTER)
  503. frame->is_from_san = true;
  504. if (port->type == HSR_PT_MASTER ||
  505. port->type == HSR_PT_INTERLINK) {
  506. /* Sequence nr for the master/interlink node */
  507. lockdep_assert_held(&hsr->seqnr_lock);
  508. frame->sequence_nr = hsr->sequence_nr;
  509. hsr->sequence_nr++;
  510. }
  511. }
  512. int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
  513. struct hsr_frame_info *frame)
  514. {
  515. struct hsr_port *port = frame->port_rcv;
  516. struct hsr_priv *hsr = port->hsr;
  517. /* HSRv0 supervisory frames double as a tag so treat them as tagged. */
  518. if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
  519. proto == htons(ETH_P_HSR)) {
  520. /* Check if skb contains hsr_ethhdr */
  521. if (skb->mac_len < sizeof(struct hsr_ethhdr))
  522. return -EINVAL;
  523. /* HSR tagged frame :- Data or Supervision */
  524. frame->skb_std = NULL;
  525. frame->skb_prp = NULL;
  526. frame->skb_hsr = skb;
  527. frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
  528. return 0;
  529. }
  530. /* Standard frame or PRP from master port */
  531. handle_std_frame(skb, frame);
  532. return 0;
  533. }
  534. int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
  535. struct hsr_frame_info *frame)
  536. {
  537. /* Supervision frame */
  538. struct prp_rct *rct = skb_get_PRP_rct(skb);
  539. if (rct &&
  540. prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
  541. frame->skb_hsr = NULL;
  542. frame->skb_std = NULL;
  543. frame->skb_prp = skb;
  544. frame->sequence_nr = prp_get_skb_sequence_nr(rct);
  545. return 0;
  546. }
  547. handle_std_frame(skb, frame);
  548. return 0;
  549. }
  550. static int fill_frame_info(struct hsr_frame_info *frame,
  551. struct sk_buff *skb, struct hsr_port *port)
  552. {
  553. struct hsr_priv *hsr = port->hsr;
  554. struct hsr_vlan_ethhdr *vlan_hdr;
  555. struct list_head *n_db;
  556. struct ethhdr *ethhdr;
  557. __be16 proto;
  558. int ret;
  559. /* Check if skb contains ethhdr */
  560. if (skb->mac_len < sizeof(struct ethhdr))
  561. return -EINVAL;
  562. memset(frame, 0, sizeof(*frame));
  563. frame->is_supervision = is_supervision_frame(port->hsr, skb);
  564. if (frame->is_supervision && hsr->redbox)
  565. frame->is_proxy_supervision =
  566. is_proxy_supervision_frame(port->hsr, skb);
  567. n_db = &hsr->node_db;
  568. if (port->type == HSR_PT_INTERLINK)
  569. n_db = &hsr->proxy_node_db;
  570. frame->node_src = hsr_get_node(port, n_db, skb,
  571. frame->is_supervision, port->type);
  572. if (!frame->node_src)
  573. return -1; /* Unknown node and !is_supervision, or no mem */
  574. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  575. frame->is_vlan = false;
  576. proto = ethhdr->h_proto;
  577. if (proto == htons(ETH_P_8021Q))
  578. frame->is_vlan = true;
  579. if (frame->is_vlan) {
  580. /* Note: skb->mac_len might be wrong here. */
  581. if (!pskb_may_pull(skb,
  582. skb_mac_offset(skb) +
  583. offsetofend(struct hsr_vlan_ethhdr, vlanhdr)))
  584. return -EINVAL;
  585. vlan_hdr = (struct hsr_vlan_ethhdr *)skb_mac_header(skb);
  586. proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
  587. /* FIXME: */
  588. netdev_warn_once(skb->dev, "VLAN not yet supported");
  589. return -EINVAL;
  590. }
  591. frame->is_from_san = false;
  592. frame->port_rcv = port;
  593. ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
  594. if (ret)
  595. return ret;
  596. check_local_dest(port->hsr, skb, frame);
  597. return 0;
  598. }
  599. /* Must be called holding rcu read lock (because of the port parameter) */
  600. void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
  601. {
  602. struct hsr_frame_info frame;
  603. rcu_read_lock();
  604. if (fill_frame_info(&frame, skb, port) < 0)
  605. goto out_drop;
  606. hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
  607. hsr_forward_do(&frame);
  608. rcu_read_unlock();
  609. /* Gets called for ingress frames as well as egress from master port.
  610. * So check and increment stats for master port only here.
  611. */
  612. if (port->type == HSR_PT_MASTER || port->type == HSR_PT_INTERLINK) {
  613. port->dev->stats.tx_packets++;
  614. port->dev->stats.tx_bytes += skb->len;
  615. }
  616. kfree_skb(frame.skb_hsr);
  617. kfree_skb(frame.skb_prp);
  618. kfree_skb(frame.skb_std);
  619. return;
  620. out_drop:
  621. rcu_read_unlock();
  622. port->dev->stats.tx_dropped++;
  623. kfree_skb(skb);
  624. }