hsr_framereg.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2011-2014 Autronica Fire and Security AS
  3. *
  4. * Author(s):
  5. * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6. *
  7. * The HSR spec says never to forward the same frame twice on the same
  8. * interface. A frame is identified by its source MAC address and its HSR
  9. * sequence number. This code keeps track of senders and their sequence numbers
  10. * to allow filtering of duplicate frames, and to detect HSR ring errors.
  11. * Same code handles filtering of duplicates for PRP as well.
  12. */
  13. #include <linux/if_ether.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/slab.h>
  16. #include <linux/rculist.h>
  17. #include "hsr_main.h"
  18. #include "hsr_framereg.h"
  19. #include "hsr_netlink.h"
  20. /* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
  21. * false otherwise.
  22. */
  23. static bool seq_nr_after(u16 a, u16 b)
  24. {
  25. /* Remove inconsistency where
  26. * seq_nr_after(a, b) == seq_nr_before(a, b)
  27. */
  28. if ((int)b - a == 32768)
  29. return false;
  30. return (((s16)(b - a)) < 0);
  31. }
  32. #define seq_nr_before(a, b) seq_nr_after((b), (a))
  33. #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
  34. bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr)
  35. {
  36. if (!hsr->redbox || !is_valid_ether_addr(hsr->macaddress_redbox))
  37. return false;
  38. return ether_addr_equal(addr, hsr->macaddress_redbox);
  39. }
  40. bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
  41. {
  42. struct hsr_self_node *sn;
  43. bool ret = false;
  44. rcu_read_lock();
  45. sn = rcu_dereference(hsr->self_node);
  46. if (!sn) {
  47. WARN_ONCE(1, "HSR: No self node\n");
  48. goto out;
  49. }
  50. if (ether_addr_equal(addr, sn->macaddress_A) ||
  51. ether_addr_equal(addr, sn->macaddress_B))
  52. ret = true;
  53. out:
  54. rcu_read_unlock();
  55. return ret;
  56. }
  57. /* Search for mac entry. Caller must hold rcu read lock.
  58. */
  59. static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
  60. const unsigned char addr[ETH_ALEN])
  61. {
  62. struct hsr_node *node;
  63. list_for_each_entry_rcu(node, node_db, mac_list) {
  64. if (ether_addr_equal(node->macaddress_A, addr))
  65. return node;
  66. }
  67. return NULL;
  68. }
  69. /* Check if node for a given MAC address is already present in data base
  70. */
  71. bool hsr_is_node_in_db(struct list_head *node_db,
  72. const unsigned char addr[ETH_ALEN])
  73. {
  74. return !!find_node_by_addr_A(node_db, addr);
  75. }
  76. /* Helper for device init; the self_node is used in hsr_rcv() to recognize
  77. * frames from self that's been looped over the HSR ring.
  78. */
  79. int hsr_create_self_node(struct hsr_priv *hsr,
  80. const unsigned char addr_a[ETH_ALEN],
  81. const unsigned char addr_b[ETH_ALEN])
  82. {
  83. struct hsr_self_node *sn, *old;
  84. sn = kmalloc(sizeof(*sn), GFP_KERNEL);
  85. if (!sn)
  86. return -ENOMEM;
  87. ether_addr_copy(sn->macaddress_A, addr_a);
  88. ether_addr_copy(sn->macaddress_B, addr_b);
  89. spin_lock_bh(&hsr->list_lock);
  90. old = rcu_replace_pointer(hsr->self_node, sn,
  91. lockdep_is_held(&hsr->list_lock));
  92. spin_unlock_bh(&hsr->list_lock);
  93. if (old)
  94. kfree_rcu(old, rcu_head);
  95. return 0;
  96. }
  97. void hsr_del_self_node(struct hsr_priv *hsr)
  98. {
  99. struct hsr_self_node *old;
  100. spin_lock_bh(&hsr->list_lock);
  101. old = rcu_replace_pointer(hsr->self_node, NULL,
  102. lockdep_is_held(&hsr->list_lock));
  103. spin_unlock_bh(&hsr->list_lock);
  104. if (old)
  105. kfree_rcu(old, rcu_head);
  106. }
  107. void hsr_del_nodes(struct list_head *node_db)
  108. {
  109. struct hsr_node *node;
  110. struct hsr_node *tmp;
  111. list_for_each_entry_safe(node, tmp, node_db, mac_list)
  112. kfree(node);
  113. }
  114. void prp_handle_san_frame(bool san, enum hsr_port_type port,
  115. struct hsr_node *node)
  116. {
  117. /* Mark if the SAN node is over LAN_A or LAN_B */
  118. if (port == HSR_PT_SLAVE_A) {
  119. node->san_a = true;
  120. return;
  121. }
  122. if (port == HSR_PT_SLAVE_B)
  123. node->san_b = true;
  124. }
  125. /* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
  126. * seq_out is used to initialize filtering of outgoing duplicate frames
  127. * originating from the newly added node.
  128. */
  129. static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
  130. struct list_head *node_db,
  131. unsigned char addr[],
  132. u16 seq_out, bool san,
  133. enum hsr_port_type rx_port)
  134. {
  135. struct hsr_node *new_node, *node;
  136. unsigned long now;
  137. int i;
  138. new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
  139. if (!new_node)
  140. return NULL;
  141. ether_addr_copy(new_node->macaddress_A, addr);
  142. spin_lock_init(&new_node->seq_out_lock);
  143. /* We are only interested in time diffs here, so use current jiffies
  144. * as initialization. (0 could trigger an spurious ring error warning).
  145. */
  146. now = jiffies;
  147. for (i = 0; i < HSR_PT_PORTS; i++) {
  148. new_node->time_in[i] = now;
  149. new_node->time_out[i] = now;
  150. }
  151. for (i = 0; i < HSR_PT_PORTS; i++)
  152. new_node->seq_out[i] = seq_out;
  153. if (san && hsr->proto_ops->handle_san_frame)
  154. hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
  155. spin_lock_bh(&hsr->list_lock);
  156. list_for_each_entry_rcu(node, node_db, mac_list,
  157. lockdep_is_held(&hsr->list_lock)) {
  158. if (ether_addr_equal(node->macaddress_A, addr))
  159. goto out;
  160. if (ether_addr_equal(node->macaddress_B, addr))
  161. goto out;
  162. }
  163. list_add_tail_rcu(&new_node->mac_list, node_db);
  164. spin_unlock_bh(&hsr->list_lock);
  165. return new_node;
  166. out:
  167. spin_unlock_bh(&hsr->list_lock);
  168. kfree(new_node);
  169. return node;
  170. }
  171. void prp_update_san_info(struct hsr_node *node, bool is_sup)
  172. {
  173. if (!is_sup)
  174. return;
  175. node->san_a = false;
  176. node->san_b = false;
  177. }
  178. /* Get the hsr_node from which 'skb' was sent.
  179. */
  180. struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
  181. struct sk_buff *skb, bool is_sup,
  182. enum hsr_port_type rx_port)
  183. {
  184. struct hsr_priv *hsr = port->hsr;
  185. struct hsr_node *node;
  186. struct ethhdr *ethhdr;
  187. struct prp_rct *rct;
  188. bool san = false;
  189. u16 seq_out;
  190. if (!skb_mac_header_was_set(skb))
  191. return NULL;
  192. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  193. list_for_each_entry_rcu(node, node_db, mac_list) {
  194. if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
  195. if (hsr->proto_ops->update_san_info)
  196. hsr->proto_ops->update_san_info(node, is_sup);
  197. return node;
  198. }
  199. if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
  200. if (hsr->proto_ops->update_san_info)
  201. hsr->proto_ops->update_san_info(node, is_sup);
  202. return node;
  203. }
  204. }
  205. /* Check if required node is not in proxy nodes table */
  206. list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
  207. if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
  208. if (hsr->proto_ops->update_san_info)
  209. hsr->proto_ops->update_san_info(node, is_sup);
  210. return node;
  211. }
  212. }
  213. /* Everyone may create a node entry, connected node to a HSR/PRP
  214. * device.
  215. */
  216. if (ethhdr->h_proto == htons(ETH_P_PRP) ||
  217. ethhdr->h_proto == htons(ETH_P_HSR)) {
  218. /* Check if skb contains hsr_ethhdr */
  219. if (skb->mac_len < sizeof(struct hsr_ethhdr))
  220. return NULL;
  221. /* Use the existing sequence_nr from the tag as starting point
  222. * for filtering duplicate frames.
  223. */
  224. seq_out = hsr_get_skb_sequence_nr(skb) - 1;
  225. } else {
  226. rct = skb_get_PRP_rct(skb);
  227. if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
  228. seq_out = prp_get_skb_sequence_nr(rct);
  229. } else {
  230. if (rx_port != HSR_PT_MASTER)
  231. san = true;
  232. seq_out = HSR_SEQNR_START;
  233. }
  234. }
  235. return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
  236. san, rx_port);
  237. }
  238. /* Use the Supervision frame's info about an eventual macaddress_B for merging
  239. * nodes that has previously had their macaddress_B registered as a separate
  240. * node.
  241. */
  242. void hsr_handle_sup_frame(struct hsr_frame_info *frame)
  243. {
  244. struct hsr_node *node_curr = frame->node_src;
  245. struct hsr_port *port_rcv = frame->port_rcv;
  246. struct hsr_priv *hsr = port_rcv->hsr;
  247. struct hsr_sup_payload *hsr_sp;
  248. struct hsr_sup_tlv *hsr_sup_tlv;
  249. struct hsr_node *node_real;
  250. struct sk_buff *skb = NULL;
  251. struct list_head *node_db;
  252. struct ethhdr *ethhdr;
  253. int i;
  254. unsigned int pull_size = 0;
  255. unsigned int total_pull_size = 0;
  256. /* Here either frame->skb_hsr or frame->skb_prp should be
  257. * valid as supervision frame always will have protocol
  258. * header info.
  259. */
  260. if (frame->skb_hsr)
  261. skb = frame->skb_hsr;
  262. else if (frame->skb_prp)
  263. skb = frame->skb_prp;
  264. else if (frame->skb_std)
  265. skb = frame->skb_std;
  266. if (!skb)
  267. return;
  268. /* Leave the ethernet header. */
  269. pull_size = sizeof(struct ethhdr);
  270. skb_pull(skb, pull_size);
  271. total_pull_size += pull_size;
  272. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  273. /* And leave the HSR tag. */
  274. if (ethhdr->h_proto == htons(ETH_P_HSR)) {
  275. pull_size = sizeof(struct hsr_tag);
  276. skb_pull(skb, pull_size);
  277. total_pull_size += pull_size;
  278. }
  279. /* And leave the HSR sup tag. */
  280. pull_size = sizeof(struct hsr_sup_tag);
  281. skb_pull(skb, pull_size);
  282. total_pull_size += pull_size;
  283. /* get HSR sup payload */
  284. hsr_sp = (struct hsr_sup_payload *)skb->data;
  285. /* Merge node_curr (registered on macaddress_B) into node_real */
  286. node_db = &port_rcv->hsr->node_db;
  287. node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
  288. if (!node_real)
  289. /* No frame received from AddrA of this node yet */
  290. node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
  291. HSR_SEQNR_START - 1, true,
  292. port_rcv->type);
  293. if (!node_real)
  294. goto done; /* No mem */
  295. if (node_real == node_curr)
  296. /* Node has already been merged */
  297. goto done;
  298. /* Leave the first HSR sup payload. */
  299. pull_size = sizeof(struct hsr_sup_payload);
  300. skb_pull(skb, pull_size);
  301. total_pull_size += pull_size;
  302. /* Get second supervision tlv */
  303. hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
  304. /* And check if it is a redbox mac TLV */
  305. if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
  306. /* We could stop here after pushing hsr_sup_payload,
  307. * or proceed and allow macaddress_B and for redboxes.
  308. */
  309. /* Sanity check length */
  310. if (hsr_sup_tlv->HSR_TLV_length != 6)
  311. goto done;
  312. /* Leave the second HSR sup tlv. */
  313. pull_size = sizeof(struct hsr_sup_tlv);
  314. skb_pull(skb, pull_size);
  315. total_pull_size += pull_size;
  316. /* Get redbox mac address. */
  317. hsr_sp = (struct hsr_sup_payload *)skb->data;
  318. /* Check if redbox mac and node mac are equal. */
  319. if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
  320. /* This is a redbox supervision frame for a VDAN! */
  321. goto done;
  322. }
  323. }
  324. ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
  325. spin_lock_bh(&node_real->seq_out_lock);
  326. for (i = 0; i < HSR_PT_PORTS; i++) {
  327. if (!node_curr->time_in_stale[i] &&
  328. time_after(node_curr->time_in[i], node_real->time_in[i])) {
  329. node_real->time_in[i] = node_curr->time_in[i];
  330. node_real->time_in_stale[i] =
  331. node_curr->time_in_stale[i];
  332. }
  333. if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
  334. node_real->seq_out[i] = node_curr->seq_out[i];
  335. }
  336. spin_unlock_bh(&node_real->seq_out_lock);
  337. node_real->addr_B_port = port_rcv->type;
  338. spin_lock_bh(&hsr->list_lock);
  339. if (!node_curr->removed) {
  340. list_del_rcu(&node_curr->mac_list);
  341. node_curr->removed = true;
  342. kfree_rcu(node_curr, rcu_head);
  343. }
  344. spin_unlock_bh(&hsr->list_lock);
  345. done:
  346. /* Push back here */
  347. skb_push(skb, total_pull_size);
  348. }
  349. /* 'skb' is a frame meant for this host, that is to be passed to upper layers.
  350. *
  351. * If the frame was sent by a node's B interface, replace the source
  352. * address with that node's "official" address (macaddress_A) so that upper
  353. * layers recognize where it came from.
  354. */
  355. void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
  356. {
  357. if (!skb_mac_header_was_set(skb)) {
  358. WARN_ONCE(1, "%s: Mac header not set\n", __func__);
  359. return;
  360. }
  361. memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
  362. }
  363. /* 'skb' is a frame meant for another host.
  364. * 'port' is the outgoing interface
  365. *
  366. * Substitute the target (dest) MAC address if necessary, so the it matches the
  367. * recipient interface MAC address, regardless of whether that is the
  368. * recipient's A or B interface.
  369. * This is needed to keep the packets flowing through switches that learn on
  370. * which "side" the different interfaces are.
  371. */
  372. void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
  373. struct hsr_port *port)
  374. {
  375. struct hsr_node *node_dst;
  376. if (!skb_mac_header_was_set(skb)) {
  377. WARN_ONCE(1, "%s: Mac header not set\n", __func__);
  378. return;
  379. }
  380. if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
  381. return;
  382. node_dst = find_node_by_addr_A(&port->hsr->node_db,
  383. eth_hdr(skb)->h_dest);
  384. if (!node_dst && port->hsr->redbox)
  385. node_dst = find_node_by_addr_A(&port->hsr->proxy_node_db,
  386. eth_hdr(skb)->h_dest);
  387. if (!node_dst) {
  388. if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
  389. netdev_err(skb->dev, "%s: Unknown node\n", __func__);
  390. return;
  391. }
  392. if (port->type != node_dst->addr_B_port)
  393. return;
  394. if (is_valid_ether_addr(node_dst->macaddress_B))
  395. ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
  396. }
  397. void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
  398. u16 sequence_nr)
  399. {
  400. /* Don't register incoming frames without a valid sequence number. This
  401. * ensures entries of restarted nodes gets pruned so that they can
  402. * re-register and resume communications.
  403. */
  404. if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
  405. seq_nr_before(sequence_nr, node->seq_out[port->type]))
  406. return;
  407. node->time_in[port->type] = jiffies;
  408. node->time_in_stale[port->type] = false;
  409. }
  410. /* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
  411. * ethhdr->h_source address and skb->mac_header set.
  412. *
  413. * Return:
  414. * 1 if frame can be shown to have been sent recently on this interface,
  415. * 0 otherwise, or
  416. * negative error code on error
  417. */
  418. int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
  419. u16 sequence_nr)
  420. {
  421. spin_lock_bh(&node->seq_out_lock);
  422. if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
  423. time_is_after_jiffies(node->time_out[port->type] +
  424. msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
  425. spin_unlock_bh(&node->seq_out_lock);
  426. return 1;
  427. }
  428. node->time_out[port->type] = jiffies;
  429. node->seq_out[port->type] = sequence_nr;
  430. spin_unlock_bh(&node->seq_out_lock);
  431. return 0;
  432. }
  433. static struct hsr_port *get_late_port(struct hsr_priv *hsr,
  434. struct hsr_node *node)
  435. {
  436. if (node->time_in_stale[HSR_PT_SLAVE_A])
  437. return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
  438. if (node->time_in_stale[HSR_PT_SLAVE_B])
  439. return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
  440. if (time_after(node->time_in[HSR_PT_SLAVE_B],
  441. node->time_in[HSR_PT_SLAVE_A] +
  442. msecs_to_jiffies(MAX_SLAVE_DIFF)))
  443. return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
  444. if (time_after(node->time_in[HSR_PT_SLAVE_A],
  445. node->time_in[HSR_PT_SLAVE_B] +
  446. msecs_to_jiffies(MAX_SLAVE_DIFF)))
  447. return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
  448. return NULL;
  449. }
  450. /* Remove stale sequence_nr records. Called by timer every
  451. * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
  452. */
  453. void hsr_prune_nodes(struct timer_list *t)
  454. {
  455. struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
  456. struct hsr_node *node;
  457. struct hsr_node *tmp;
  458. struct hsr_port *port;
  459. unsigned long timestamp;
  460. unsigned long time_a, time_b;
  461. spin_lock_bh(&hsr->list_lock);
  462. list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
  463. /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
  464. * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
  465. * the master port. Thus the master node will be repeatedly
  466. * pruned leading to packet loss.
  467. */
  468. if (hsr_addr_is_self(hsr, node->macaddress_A))
  469. continue;
  470. /* Shorthand */
  471. time_a = node->time_in[HSR_PT_SLAVE_A];
  472. time_b = node->time_in[HSR_PT_SLAVE_B];
  473. /* Check for timestamps old enough to risk wrap-around */
  474. if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
  475. node->time_in_stale[HSR_PT_SLAVE_A] = true;
  476. if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
  477. node->time_in_stale[HSR_PT_SLAVE_B] = true;
  478. /* Get age of newest frame from node.
  479. * At least one time_in is OK here; nodes get pruned long
  480. * before both time_ins can get stale
  481. */
  482. timestamp = time_a;
  483. if (node->time_in_stale[HSR_PT_SLAVE_A] ||
  484. (!node->time_in_stale[HSR_PT_SLAVE_B] &&
  485. time_after(time_b, time_a)))
  486. timestamp = time_b;
  487. /* Warn of ring error only as long as we get frames at all */
  488. if (time_is_after_jiffies(timestamp +
  489. msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
  490. rcu_read_lock();
  491. port = get_late_port(hsr, node);
  492. if (port)
  493. hsr_nl_ringerror(hsr, node->macaddress_A, port);
  494. rcu_read_unlock();
  495. }
  496. /* Prune old entries */
  497. if (time_is_before_jiffies(timestamp +
  498. msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
  499. hsr_nl_nodedown(hsr, node->macaddress_A);
  500. if (!node->removed) {
  501. list_del_rcu(&node->mac_list);
  502. node->removed = true;
  503. /* Note that we need to free this entry later: */
  504. kfree_rcu(node, rcu_head);
  505. }
  506. }
  507. }
  508. spin_unlock_bh(&hsr->list_lock);
  509. /* Restart timer */
  510. mod_timer(&hsr->prune_timer,
  511. jiffies + msecs_to_jiffies(PRUNE_PERIOD));
  512. }
  513. void hsr_prune_proxy_nodes(struct timer_list *t)
  514. {
  515. struct hsr_priv *hsr = from_timer(hsr, t, prune_proxy_timer);
  516. unsigned long timestamp;
  517. struct hsr_node *node;
  518. struct hsr_node *tmp;
  519. spin_lock_bh(&hsr->list_lock);
  520. list_for_each_entry_safe(node, tmp, &hsr->proxy_node_db, mac_list) {
  521. /* Don't prune RedBox node. */
  522. if (hsr_addr_is_redbox(hsr, node->macaddress_A))
  523. continue;
  524. timestamp = node->time_in[HSR_PT_INTERLINK];
  525. /* Prune old entries */
  526. if (time_is_before_jiffies(timestamp +
  527. msecs_to_jiffies(HSR_PROXY_NODE_FORGET_TIME))) {
  528. hsr_nl_nodedown(hsr, node->macaddress_A);
  529. if (!node->removed) {
  530. list_del_rcu(&node->mac_list);
  531. node->removed = true;
  532. /* Note that we need to free this entry later: */
  533. kfree_rcu(node, rcu_head);
  534. }
  535. }
  536. }
  537. spin_unlock_bh(&hsr->list_lock);
  538. /* Restart timer */
  539. mod_timer(&hsr->prune_proxy_timer,
  540. jiffies + msecs_to_jiffies(PRUNE_PROXY_PERIOD));
  541. }
  542. void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
  543. unsigned char addr[ETH_ALEN])
  544. {
  545. struct hsr_node *node;
  546. if (!_pos) {
  547. node = list_first_or_null_rcu(&hsr->node_db,
  548. struct hsr_node, mac_list);
  549. if (node)
  550. ether_addr_copy(addr, node->macaddress_A);
  551. return node;
  552. }
  553. node = _pos;
  554. list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
  555. ether_addr_copy(addr, node->macaddress_A);
  556. return node;
  557. }
  558. return NULL;
  559. }
  560. int hsr_get_node_data(struct hsr_priv *hsr,
  561. const unsigned char *addr,
  562. unsigned char addr_b[ETH_ALEN],
  563. unsigned int *addr_b_ifindex,
  564. int *if1_age,
  565. u16 *if1_seq,
  566. int *if2_age,
  567. u16 *if2_seq)
  568. {
  569. struct hsr_node *node;
  570. struct hsr_port *port;
  571. unsigned long tdiff;
  572. node = find_node_by_addr_A(&hsr->node_db, addr);
  573. if (!node)
  574. return -ENOENT;
  575. ether_addr_copy(addr_b, node->macaddress_B);
  576. tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
  577. if (node->time_in_stale[HSR_PT_SLAVE_A])
  578. *if1_age = INT_MAX;
  579. #if HZ <= MSEC_PER_SEC
  580. else if (tdiff > msecs_to_jiffies(INT_MAX))
  581. *if1_age = INT_MAX;
  582. #endif
  583. else
  584. *if1_age = jiffies_to_msecs(tdiff);
  585. tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
  586. if (node->time_in_stale[HSR_PT_SLAVE_B])
  587. *if2_age = INT_MAX;
  588. #if HZ <= MSEC_PER_SEC
  589. else if (tdiff > msecs_to_jiffies(INT_MAX))
  590. *if2_age = INT_MAX;
  591. #endif
  592. else
  593. *if2_age = jiffies_to_msecs(tdiff);
  594. /* Present sequence numbers as if they were incoming on interface */
  595. *if1_seq = node->seq_out[HSR_PT_SLAVE_B];
  596. *if2_seq = node->seq_out[HSR_PT_SLAVE_A];
  597. if (node->addr_B_port != HSR_PT_NONE) {
  598. port = hsr_port_get_hsr(hsr, node->addr_B_port);
  599. *addr_b_ifindex = port->dev->ifindex;
  600. } else {
  601. *addr_b_ifindex = -1;
  602. }
  603. return 0;
  604. }