mrp.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * IEEE 802.1Q Multiple Registration Protocol (MRP)
  4. *
  5. * Copyright (c) 2012 Massachusetts Institute of Technology
  6. *
  7. * Adapted from code in net/802/garp.c
  8. * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/timer.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/rtnetlink.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <net/mrp.h>
  19. #include <linux/unaligned.h>
  20. static unsigned int mrp_join_time __read_mostly = 200;
  21. module_param(mrp_join_time, uint, 0644);
  22. MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
  23. static unsigned int mrp_periodic_time __read_mostly = 1000;
  24. module_param(mrp_periodic_time, uint, 0644);
  25. MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
  26. MODULE_DESCRIPTION("IEEE 802.1Q Multiple Registration Protocol (MRP)");
  27. MODULE_LICENSE("GPL");
  28. static const u8
  29. mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
  30. [MRP_APPLICANT_VO] = {
  31. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  32. [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
  33. [MRP_EVENT_LV] = MRP_APPLICANT_VO,
  34. [MRP_EVENT_TX] = MRP_APPLICANT_VO,
  35. [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
  36. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
  37. [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
  38. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
  39. [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
  40. [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
  41. [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
  42. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
  43. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
  44. },
  45. [MRP_APPLICANT_VP] = {
  46. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  47. [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
  48. [MRP_EVENT_LV] = MRP_APPLICANT_VO,
  49. [MRP_EVENT_TX] = MRP_APPLICANT_AA,
  50. [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
  51. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
  52. [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
  53. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
  54. [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
  55. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  56. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  57. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  58. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
  59. },
  60. [MRP_APPLICANT_VN] = {
  61. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  62. [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
  63. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  64. [MRP_EVENT_TX] = MRP_APPLICANT_AN,
  65. [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
  66. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
  67. [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
  68. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
  69. [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
  70. [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
  71. [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
  72. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
  73. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
  74. },
  75. [MRP_APPLICANT_AN] = {
  76. [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
  77. [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
  78. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  79. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  80. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
  81. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
  82. [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
  83. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
  84. [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
  85. [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
  86. [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
  87. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
  88. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
  89. },
  90. [MRP_APPLICANT_AA] = {
  91. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  92. [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
  93. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  94. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  95. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
  96. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
  97. [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
  98. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
  99. [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
  100. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  101. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  102. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  103. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
  104. },
  105. [MRP_APPLICANT_QA] = {
  106. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  107. [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
  108. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  109. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  110. [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
  111. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
  112. [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
  113. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
  114. [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
  115. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  116. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  117. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  118. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
  119. },
  120. [MRP_APPLICANT_LA] = {
  121. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  122. [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
  123. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  124. [MRP_EVENT_TX] = MRP_APPLICANT_VO,
  125. [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
  126. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
  127. [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
  128. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
  129. [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
  130. [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
  131. [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
  132. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
  133. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
  134. },
  135. [MRP_APPLICANT_AO] = {
  136. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  137. [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
  138. [MRP_EVENT_LV] = MRP_APPLICANT_AO,
  139. [MRP_EVENT_TX] = MRP_APPLICANT_AO,
  140. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
  141. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
  142. [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
  143. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
  144. [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
  145. [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
  146. [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
  147. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
  148. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
  149. },
  150. [MRP_APPLICANT_QO] = {
  151. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  152. [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
  153. [MRP_EVENT_LV] = MRP_APPLICANT_QO,
  154. [MRP_EVENT_TX] = MRP_APPLICANT_QO,
  155. [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
  156. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
  157. [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
  158. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
  159. [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
  160. [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
  161. [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
  162. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
  163. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
  164. },
  165. [MRP_APPLICANT_AP] = {
  166. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  167. [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
  168. [MRP_EVENT_LV] = MRP_APPLICANT_AO,
  169. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  170. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
  171. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
  172. [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
  173. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
  174. [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
  175. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  176. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  177. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  178. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
  179. },
  180. [MRP_APPLICANT_QP] = {
  181. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  182. [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
  183. [MRP_EVENT_LV] = MRP_APPLICANT_QO,
  184. [MRP_EVENT_TX] = MRP_APPLICANT_QP,
  185. [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
  186. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
  187. [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
  188. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
  189. [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
  190. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  191. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  192. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  193. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
  194. },
  195. };
  196. static const u8
  197. mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
  198. [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
  199. [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
  200. [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
  201. [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
  202. [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
  203. [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
  204. [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
  205. [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
  206. [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
  207. [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
  208. [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
  209. };
  210. static void mrp_attrvalue_inc(void *value, u8 len)
  211. {
  212. u8 *v = (u8 *)value;
  213. /* Add 1 to the last byte. If it becomes zero,
  214. * go to the previous byte and repeat.
  215. */
  216. while (len > 0 && !++v[--len])
  217. ;
  218. }
  219. static int mrp_attr_cmp(const struct mrp_attr *attr,
  220. const void *value, u8 len, u8 type)
  221. {
  222. if (attr->type != type)
  223. return attr->type - type;
  224. if (attr->len != len)
  225. return attr->len - len;
  226. return memcmp(attr->value, value, len);
  227. }
  228. static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
  229. const void *value, u8 len, u8 type)
  230. {
  231. struct rb_node *parent = app->mad.rb_node;
  232. struct mrp_attr *attr;
  233. int d;
  234. while (parent) {
  235. attr = rb_entry(parent, struct mrp_attr, node);
  236. d = mrp_attr_cmp(attr, value, len, type);
  237. if (d > 0)
  238. parent = parent->rb_left;
  239. else if (d < 0)
  240. parent = parent->rb_right;
  241. else
  242. return attr;
  243. }
  244. return NULL;
  245. }
  246. static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
  247. const void *value, u8 len, u8 type)
  248. {
  249. struct rb_node *parent = NULL, **p = &app->mad.rb_node;
  250. struct mrp_attr *attr;
  251. int d;
  252. while (*p) {
  253. parent = *p;
  254. attr = rb_entry(parent, struct mrp_attr, node);
  255. d = mrp_attr_cmp(attr, value, len, type);
  256. if (d > 0)
  257. p = &parent->rb_left;
  258. else if (d < 0)
  259. p = &parent->rb_right;
  260. else {
  261. /* The attribute already exists; re-use it. */
  262. return attr;
  263. }
  264. }
  265. attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
  266. if (!attr)
  267. return attr;
  268. attr->state = MRP_APPLICANT_VO;
  269. attr->type = type;
  270. attr->len = len;
  271. memcpy(attr->value, value, len);
  272. rb_link_node(&attr->node, parent, p);
  273. rb_insert_color(&attr->node, &app->mad);
  274. return attr;
  275. }
  276. static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
  277. {
  278. rb_erase(&attr->node, &app->mad);
  279. kfree(attr);
  280. }
  281. static void mrp_attr_destroy_all(struct mrp_applicant *app)
  282. {
  283. struct rb_node *node, *next;
  284. struct mrp_attr *attr;
  285. for (node = rb_first(&app->mad);
  286. next = node ? rb_next(node) : NULL, node != NULL;
  287. node = next) {
  288. attr = rb_entry(node, struct mrp_attr, node);
  289. mrp_attr_destroy(app, attr);
  290. }
  291. }
  292. static int mrp_pdu_init(struct mrp_applicant *app)
  293. {
  294. struct sk_buff *skb;
  295. struct mrp_pdu_hdr *ph;
  296. skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
  297. GFP_ATOMIC);
  298. if (!skb)
  299. return -ENOMEM;
  300. skb->dev = app->dev;
  301. skb->protocol = app->app->pkttype.type;
  302. skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
  303. skb_reset_network_header(skb);
  304. skb_reset_transport_header(skb);
  305. ph = __skb_put(skb, sizeof(*ph));
  306. ph->version = app->app->version;
  307. app->pdu = skb;
  308. return 0;
  309. }
  310. static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
  311. {
  312. __be16 *endmark;
  313. if (skb_tailroom(app->pdu) < sizeof(*endmark))
  314. return -1;
  315. endmark = __skb_put(app->pdu, sizeof(*endmark));
  316. put_unaligned(MRP_END_MARK, endmark);
  317. return 0;
  318. }
  319. static void mrp_pdu_queue(struct mrp_applicant *app)
  320. {
  321. if (!app->pdu)
  322. return;
  323. if (mrp_cb(app->pdu)->mh)
  324. mrp_pdu_append_end_mark(app);
  325. mrp_pdu_append_end_mark(app);
  326. dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
  327. app->app->group_address, app->dev->dev_addr,
  328. app->pdu->len);
  329. skb_queue_tail(&app->queue, app->pdu);
  330. app->pdu = NULL;
  331. }
  332. static void mrp_queue_xmit(struct mrp_applicant *app)
  333. {
  334. struct sk_buff *skb;
  335. while ((skb = skb_dequeue(&app->queue)))
  336. dev_queue_xmit(skb);
  337. }
  338. static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
  339. u8 attrtype, u8 attrlen)
  340. {
  341. struct mrp_msg_hdr *mh;
  342. if (mrp_cb(app->pdu)->mh) {
  343. if (mrp_pdu_append_end_mark(app) < 0)
  344. return -1;
  345. mrp_cb(app->pdu)->mh = NULL;
  346. mrp_cb(app->pdu)->vah = NULL;
  347. }
  348. if (skb_tailroom(app->pdu) < sizeof(*mh))
  349. return -1;
  350. mh = __skb_put(app->pdu, sizeof(*mh));
  351. mh->attrtype = attrtype;
  352. mh->attrlen = attrlen;
  353. mrp_cb(app->pdu)->mh = mh;
  354. return 0;
  355. }
  356. static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
  357. const void *firstattrvalue, u8 attrlen)
  358. {
  359. struct mrp_vecattr_hdr *vah;
  360. if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
  361. return -1;
  362. vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
  363. put_unaligned(0, &vah->lenflags);
  364. memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
  365. mrp_cb(app->pdu)->vah = vah;
  366. memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
  367. return 0;
  368. }
  369. static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
  370. const struct mrp_attr *attr,
  371. enum mrp_vecattr_event vaevent)
  372. {
  373. u16 len, pos;
  374. u8 *vaevents;
  375. int err;
  376. again:
  377. if (!app->pdu) {
  378. err = mrp_pdu_init(app);
  379. if (err < 0)
  380. return err;
  381. }
  382. /* If there is no Message header in the PDU, or the Message header is
  383. * for a different attribute type, add an EndMark (if necessary) and a
  384. * new Message header to the PDU.
  385. */
  386. if (!mrp_cb(app->pdu)->mh ||
  387. mrp_cb(app->pdu)->mh->attrtype != attr->type ||
  388. mrp_cb(app->pdu)->mh->attrlen != attr->len) {
  389. if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
  390. goto queue;
  391. }
  392. /* If there is no VectorAttribute header for this Message in the PDU,
  393. * or this attribute's value does not sequentially follow the previous
  394. * attribute's value, add a new VectorAttribute header to the PDU.
  395. */
  396. if (!mrp_cb(app->pdu)->vah ||
  397. memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
  398. if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
  399. goto queue;
  400. }
  401. len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
  402. pos = len % 3;
  403. /* Events are packed into Vectors in the PDU, three to a byte. Add a
  404. * byte to the end of the Vector if necessary.
  405. */
  406. if (!pos) {
  407. if (skb_tailroom(app->pdu) < sizeof(u8))
  408. goto queue;
  409. vaevents = __skb_put(app->pdu, sizeof(u8));
  410. } else {
  411. vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
  412. }
  413. switch (pos) {
  414. case 0:
  415. *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
  416. __MRP_VECATTR_EVENT_MAX);
  417. break;
  418. case 1:
  419. *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
  420. break;
  421. case 2:
  422. *vaevents += vaevent;
  423. break;
  424. default:
  425. WARN_ON(1);
  426. }
  427. /* Increment the length of the VectorAttribute in the PDU, as well as
  428. * the value of the next attribute that would continue its Vector.
  429. */
  430. put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
  431. mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
  432. return 0;
  433. queue:
  434. mrp_pdu_queue(app);
  435. goto again;
  436. }
  437. static void mrp_attr_event(struct mrp_applicant *app,
  438. struct mrp_attr *attr, enum mrp_event event)
  439. {
  440. enum mrp_applicant_state state;
  441. state = mrp_applicant_state_table[attr->state][event];
  442. if (state == MRP_APPLICANT_INVALID) {
  443. WARN_ON(1);
  444. return;
  445. }
  446. if (event == MRP_EVENT_TX) {
  447. /* When appending the attribute fails, don't update its state
  448. * in order to retry at the next TX event.
  449. */
  450. switch (mrp_tx_action_table[attr->state]) {
  451. case MRP_TX_ACTION_NONE:
  452. case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
  453. case MRP_TX_ACTION_S_IN_OPTIONAL:
  454. break;
  455. case MRP_TX_ACTION_S_NEW:
  456. if (mrp_pdu_append_vecattr_event(
  457. app, attr, MRP_VECATTR_EVENT_NEW) < 0)
  458. return;
  459. break;
  460. case MRP_TX_ACTION_S_JOIN_IN:
  461. if (mrp_pdu_append_vecattr_event(
  462. app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
  463. return;
  464. break;
  465. case MRP_TX_ACTION_S_LV:
  466. if (mrp_pdu_append_vecattr_event(
  467. app, attr, MRP_VECATTR_EVENT_LV) < 0)
  468. return;
  469. /* As a pure applicant, sending a leave message
  470. * implies that the attribute was unregistered and
  471. * can be destroyed.
  472. */
  473. mrp_attr_destroy(app, attr);
  474. return;
  475. default:
  476. WARN_ON(1);
  477. }
  478. }
  479. attr->state = state;
  480. }
  481. int mrp_request_join(const struct net_device *dev,
  482. const struct mrp_application *appl,
  483. const void *value, u8 len, u8 type)
  484. {
  485. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  486. struct mrp_applicant *app = rtnl_dereference(
  487. port->applicants[appl->type]);
  488. struct mrp_attr *attr;
  489. if (sizeof(struct mrp_skb_cb) + len >
  490. sizeof_field(struct sk_buff, cb))
  491. return -ENOMEM;
  492. spin_lock_bh(&app->lock);
  493. attr = mrp_attr_create(app, value, len, type);
  494. if (!attr) {
  495. spin_unlock_bh(&app->lock);
  496. return -ENOMEM;
  497. }
  498. mrp_attr_event(app, attr, MRP_EVENT_JOIN);
  499. spin_unlock_bh(&app->lock);
  500. return 0;
  501. }
  502. EXPORT_SYMBOL_GPL(mrp_request_join);
  503. void mrp_request_leave(const struct net_device *dev,
  504. const struct mrp_application *appl,
  505. const void *value, u8 len, u8 type)
  506. {
  507. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  508. struct mrp_applicant *app = rtnl_dereference(
  509. port->applicants[appl->type]);
  510. struct mrp_attr *attr;
  511. if (sizeof(struct mrp_skb_cb) + len >
  512. sizeof_field(struct sk_buff, cb))
  513. return;
  514. spin_lock_bh(&app->lock);
  515. attr = mrp_attr_lookup(app, value, len, type);
  516. if (!attr) {
  517. spin_unlock_bh(&app->lock);
  518. return;
  519. }
  520. mrp_attr_event(app, attr, MRP_EVENT_LV);
  521. spin_unlock_bh(&app->lock);
  522. }
  523. EXPORT_SYMBOL_GPL(mrp_request_leave);
  524. static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
  525. {
  526. struct rb_node *node, *next;
  527. struct mrp_attr *attr;
  528. for (node = rb_first(&app->mad);
  529. next = node ? rb_next(node) : NULL, node != NULL;
  530. node = next) {
  531. attr = rb_entry(node, struct mrp_attr, node);
  532. mrp_attr_event(app, attr, event);
  533. }
  534. }
  535. static void mrp_join_timer_arm(struct mrp_applicant *app)
  536. {
  537. unsigned long delay;
  538. delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
  539. mod_timer(&app->join_timer, jiffies + delay);
  540. }
  541. static void mrp_join_timer(struct timer_list *t)
  542. {
  543. struct mrp_applicant *app = from_timer(app, t, join_timer);
  544. spin_lock(&app->lock);
  545. mrp_mad_event(app, MRP_EVENT_TX);
  546. mrp_pdu_queue(app);
  547. spin_unlock(&app->lock);
  548. mrp_queue_xmit(app);
  549. spin_lock(&app->lock);
  550. if (likely(app->active))
  551. mrp_join_timer_arm(app);
  552. spin_unlock(&app->lock);
  553. }
  554. static void mrp_periodic_timer_arm(struct mrp_applicant *app)
  555. {
  556. mod_timer(&app->periodic_timer,
  557. jiffies + msecs_to_jiffies(mrp_periodic_time));
  558. }
  559. static void mrp_periodic_timer(struct timer_list *t)
  560. {
  561. struct mrp_applicant *app = from_timer(app, t, periodic_timer);
  562. spin_lock(&app->lock);
  563. if (likely(app->active)) {
  564. mrp_mad_event(app, MRP_EVENT_PERIODIC);
  565. mrp_pdu_queue(app);
  566. mrp_periodic_timer_arm(app);
  567. }
  568. spin_unlock(&app->lock);
  569. }
  570. static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
  571. {
  572. __be16 endmark;
  573. if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
  574. return -1;
  575. if (endmark == MRP_END_MARK) {
  576. *offset += sizeof(endmark);
  577. return -1;
  578. }
  579. return 0;
  580. }
  581. static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
  582. struct sk_buff *skb,
  583. enum mrp_vecattr_event vaevent)
  584. {
  585. struct mrp_attr *attr;
  586. enum mrp_event event;
  587. attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
  588. mrp_cb(skb)->mh->attrlen,
  589. mrp_cb(skb)->mh->attrtype);
  590. if (attr == NULL)
  591. return;
  592. switch (vaevent) {
  593. case MRP_VECATTR_EVENT_NEW:
  594. event = MRP_EVENT_R_NEW;
  595. break;
  596. case MRP_VECATTR_EVENT_JOIN_IN:
  597. event = MRP_EVENT_R_JOIN_IN;
  598. break;
  599. case MRP_VECATTR_EVENT_IN:
  600. event = MRP_EVENT_R_IN;
  601. break;
  602. case MRP_VECATTR_EVENT_JOIN_MT:
  603. event = MRP_EVENT_R_JOIN_MT;
  604. break;
  605. case MRP_VECATTR_EVENT_MT:
  606. event = MRP_EVENT_R_MT;
  607. break;
  608. case MRP_VECATTR_EVENT_LV:
  609. event = MRP_EVENT_R_LV;
  610. break;
  611. default:
  612. return;
  613. }
  614. mrp_attr_event(app, attr, event);
  615. }
  616. static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
  617. struct sk_buff *skb, int *offset)
  618. {
  619. struct mrp_vecattr_hdr _vah;
  620. u16 valen;
  621. u8 vaevents, vaevent;
  622. mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
  623. &_vah);
  624. if (!mrp_cb(skb)->vah)
  625. return -1;
  626. *offset += sizeof(_vah);
  627. if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
  628. MRP_VECATTR_HDR_FLAG_LA)
  629. mrp_mad_event(app, MRP_EVENT_R_LA);
  630. valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
  631. MRP_VECATTR_HDR_LEN_MASK);
  632. /* The VectorAttribute structure in a PDU carries event information
  633. * about one or more attributes having consecutive values. Only the
  634. * value for the first attribute is contained in the structure. So
  635. * we make a copy of that value, and then increment it each time we
  636. * advance to the next event in its Vector.
  637. */
  638. if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
  639. sizeof_field(struct sk_buff, cb))
  640. return -1;
  641. if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
  642. mrp_cb(skb)->mh->attrlen) < 0)
  643. return -1;
  644. *offset += mrp_cb(skb)->mh->attrlen;
  645. /* In a VectorAttribute, the Vector contains events which are packed
  646. * three to a byte. We process one byte of the Vector at a time.
  647. */
  648. while (valen > 0) {
  649. if (skb_copy_bits(skb, *offset, &vaevents,
  650. sizeof(vaevents)) < 0)
  651. return -1;
  652. *offset += sizeof(vaevents);
  653. /* Extract and process the first event. */
  654. vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
  655. __MRP_VECATTR_EVENT_MAX);
  656. if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
  657. /* The byte is malformed; stop processing. */
  658. return -1;
  659. }
  660. mrp_pdu_parse_vecattr_event(app, skb, vaevent);
  661. /* If present, extract and process the second event. */
  662. if (!--valen)
  663. break;
  664. mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
  665. mrp_cb(skb)->mh->attrlen);
  666. vaevents %= (__MRP_VECATTR_EVENT_MAX *
  667. __MRP_VECATTR_EVENT_MAX);
  668. vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
  669. mrp_pdu_parse_vecattr_event(app, skb, vaevent);
  670. /* If present, extract and process the third event. */
  671. if (!--valen)
  672. break;
  673. mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
  674. mrp_cb(skb)->mh->attrlen);
  675. vaevents %= __MRP_VECATTR_EVENT_MAX;
  676. vaevent = vaevents;
  677. mrp_pdu_parse_vecattr_event(app, skb, vaevent);
  678. }
  679. return 0;
  680. }
  681. static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
  682. int *offset)
  683. {
  684. struct mrp_msg_hdr _mh;
  685. mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
  686. if (!mrp_cb(skb)->mh)
  687. return -1;
  688. *offset += sizeof(_mh);
  689. if (mrp_cb(skb)->mh->attrtype == 0 ||
  690. mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
  691. mrp_cb(skb)->mh->attrlen == 0)
  692. return -1;
  693. while (skb->len > *offset) {
  694. if (mrp_pdu_parse_end_mark(skb, offset) < 0)
  695. break;
  696. if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
  697. return -1;
  698. }
  699. return 0;
  700. }
  701. static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
  702. struct packet_type *pt, struct net_device *orig_dev)
  703. {
  704. struct mrp_application *appl = container_of(pt, struct mrp_application,
  705. pkttype);
  706. struct mrp_port *port;
  707. struct mrp_applicant *app;
  708. struct mrp_pdu_hdr _ph;
  709. const struct mrp_pdu_hdr *ph;
  710. int offset = skb_network_offset(skb);
  711. /* If the interface is in promiscuous mode, drop the packet if
  712. * it was unicast to another host.
  713. */
  714. if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
  715. goto out;
  716. skb = skb_share_check(skb, GFP_ATOMIC);
  717. if (unlikely(!skb))
  718. goto out;
  719. port = rcu_dereference(dev->mrp_port);
  720. if (unlikely(!port))
  721. goto out;
  722. app = rcu_dereference(port->applicants[appl->type]);
  723. if (unlikely(!app))
  724. goto out;
  725. ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
  726. if (!ph)
  727. goto out;
  728. offset += sizeof(_ph);
  729. if (ph->version != app->app->version)
  730. goto out;
  731. spin_lock(&app->lock);
  732. while (skb->len > offset) {
  733. if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
  734. break;
  735. if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
  736. break;
  737. }
  738. spin_unlock(&app->lock);
  739. out:
  740. kfree_skb(skb);
  741. return 0;
  742. }
  743. static int mrp_init_port(struct net_device *dev)
  744. {
  745. struct mrp_port *port;
  746. port = kzalloc(sizeof(*port), GFP_KERNEL);
  747. if (!port)
  748. return -ENOMEM;
  749. rcu_assign_pointer(dev->mrp_port, port);
  750. return 0;
  751. }
  752. static void mrp_release_port(struct net_device *dev)
  753. {
  754. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  755. unsigned int i;
  756. for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
  757. if (rtnl_dereference(port->applicants[i]))
  758. return;
  759. }
  760. RCU_INIT_POINTER(dev->mrp_port, NULL);
  761. kfree_rcu(port, rcu);
  762. }
  763. int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
  764. {
  765. struct mrp_applicant *app;
  766. int err;
  767. ASSERT_RTNL();
  768. if (!rtnl_dereference(dev->mrp_port)) {
  769. err = mrp_init_port(dev);
  770. if (err < 0)
  771. goto err1;
  772. }
  773. err = -ENOMEM;
  774. app = kzalloc(sizeof(*app), GFP_KERNEL);
  775. if (!app)
  776. goto err2;
  777. err = dev_mc_add(dev, appl->group_address);
  778. if (err < 0)
  779. goto err3;
  780. app->dev = dev;
  781. app->app = appl;
  782. app->mad = RB_ROOT;
  783. app->active = true;
  784. spin_lock_init(&app->lock);
  785. skb_queue_head_init(&app->queue);
  786. rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
  787. timer_setup(&app->join_timer, mrp_join_timer, 0);
  788. mrp_join_timer_arm(app);
  789. timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
  790. mrp_periodic_timer_arm(app);
  791. return 0;
  792. err3:
  793. kfree(app);
  794. err2:
  795. mrp_release_port(dev);
  796. err1:
  797. return err;
  798. }
  799. EXPORT_SYMBOL_GPL(mrp_init_applicant);
  800. void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
  801. {
  802. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  803. struct mrp_applicant *app = rtnl_dereference(
  804. port->applicants[appl->type]);
  805. ASSERT_RTNL();
  806. RCU_INIT_POINTER(port->applicants[appl->type], NULL);
  807. spin_lock_bh(&app->lock);
  808. app->active = false;
  809. spin_unlock_bh(&app->lock);
  810. /* Delete timer and generate a final TX event to flush out
  811. * all pending messages before the applicant is gone.
  812. */
  813. timer_shutdown_sync(&app->join_timer);
  814. timer_shutdown_sync(&app->periodic_timer);
  815. spin_lock_bh(&app->lock);
  816. mrp_mad_event(app, MRP_EVENT_TX);
  817. mrp_attr_destroy_all(app);
  818. mrp_pdu_queue(app);
  819. spin_unlock_bh(&app->lock);
  820. mrp_queue_xmit(app);
  821. dev_mc_del(dev, appl->group_address);
  822. kfree_rcu(app, rcu);
  823. mrp_release_port(dev);
  824. }
  825. EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
  826. int mrp_register_application(struct mrp_application *appl)
  827. {
  828. appl->pkttype.func = mrp_rcv;
  829. dev_add_pack(&appl->pkttype);
  830. return 0;
  831. }
  832. EXPORT_SYMBOL_GPL(mrp_register_application);
  833. void mrp_unregister_application(struct mrp_application *appl)
  834. {
  835. dev_remove_pack(&appl->pkttype);
  836. }
  837. EXPORT_SYMBOL_GPL(mrp_unregister_application);