fib_rules.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * net/core/fib_rules.c Generic Routing Rules
  4. *
  5. * Authors: Thomas Graf <tgraf@suug.ch>
  6. */
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <linux/slab.h>
  10. #include <linux/list.h>
  11. #include <linux/module.h>
  12. #include <net/net_namespace.h>
  13. #include <net/inet_dscp.h>
  14. #include <net/sock.h>
  15. #include <net/fib_rules.h>
  16. #include <net/ip_tunnels.h>
  17. #include <linux/indirect_call_wrapper.h>
  18. #if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES)
  19. #ifdef CONFIG_IP_MULTIPLE_TABLES
  20. #define INDIRECT_CALL_MT(f, f2, f1, ...) \
  21. INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__)
  22. #else
  23. #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
  24. #endif
  25. #elif defined(CONFIG_IP_MULTIPLE_TABLES)
  26. #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
  27. #else
  28. #define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__)
  29. #endif
  30. static const struct fib_kuid_range fib_kuid_range_unset = {
  31. KUIDT_INIT(0),
  32. KUIDT_INIT(~0),
  33. };
  34. bool fib_rule_matchall(const struct fib_rule *rule)
  35. {
  36. if (READ_ONCE(rule->iifindex) || READ_ONCE(rule->oifindex) ||
  37. rule->mark || rule->tun_id || rule->flags)
  38. return false;
  39. if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
  40. return false;
  41. if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
  42. !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
  43. return false;
  44. if (fib_rule_port_range_set(&rule->sport_range))
  45. return false;
  46. if (fib_rule_port_range_set(&rule->dport_range))
  47. return false;
  48. return true;
  49. }
  50. EXPORT_SYMBOL_GPL(fib_rule_matchall);
  51. int fib_default_rule_add(struct fib_rules_ops *ops,
  52. u32 pref, u32 table)
  53. {
  54. struct fib_rule *r;
  55. r = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT);
  56. if (r == NULL)
  57. return -ENOMEM;
  58. refcount_set(&r->refcnt, 1);
  59. r->action = FR_ACT_TO_TBL;
  60. r->pref = pref;
  61. r->table = table;
  62. r->proto = RTPROT_KERNEL;
  63. r->fr_net = ops->fro_net;
  64. r->uid_range = fib_kuid_range_unset;
  65. r->suppress_prefixlen = -1;
  66. r->suppress_ifgroup = -1;
  67. /* The lock is not required here, the list in unreachable
  68. * at the moment this function is called */
  69. list_add_tail(&r->list, &ops->rules_list);
  70. return 0;
  71. }
  72. EXPORT_SYMBOL(fib_default_rule_add);
  73. static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
  74. {
  75. struct list_head *pos;
  76. struct fib_rule *rule;
  77. if (!list_empty(&ops->rules_list)) {
  78. pos = ops->rules_list.next;
  79. if (pos->next != &ops->rules_list) {
  80. rule = list_entry(pos->next, struct fib_rule, list);
  81. if (rule->pref)
  82. return rule->pref - 1;
  83. }
  84. }
  85. return 0;
  86. }
  87. static void notify_rule_change(int event, struct fib_rule *rule,
  88. struct fib_rules_ops *ops, struct nlmsghdr *nlh,
  89. u32 pid);
  90. static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
  91. {
  92. struct fib_rules_ops *ops;
  93. rcu_read_lock();
  94. list_for_each_entry_rcu(ops, &net->rules_ops, list) {
  95. if (ops->family == family) {
  96. if (!try_module_get(ops->owner))
  97. ops = NULL;
  98. rcu_read_unlock();
  99. return ops;
  100. }
  101. }
  102. rcu_read_unlock();
  103. return NULL;
  104. }
  105. static void rules_ops_put(struct fib_rules_ops *ops)
  106. {
  107. if (ops)
  108. module_put(ops->owner);
  109. }
  110. static void flush_route_cache(struct fib_rules_ops *ops)
  111. {
  112. if (ops->flush_cache)
  113. ops->flush_cache(ops);
  114. }
  115. static int __fib_rules_register(struct fib_rules_ops *ops)
  116. {
  117. int err = -EEXIST;
  118. struct fib_rules_ops *o;
  119. struct net *net;
  120. net = ops->fro_net;
  121. if (ops->rule_size < sizeof(struct fib_rule))
  122. return -EINVAL;
  123. if (ops->match == NULL || ops->configure == NULL ||
  124. ops->compare == NULL || ops->fill == NULL ||
  125. ops->action == NULL)
  126. return -EINVAL;
  127. spin_lock(&net->rules_mod_lock);
  128. list_for_each_entry(o, &net->rules_ops, list)
  129. if (ops->family == o->family)
  130. goto errout;
  131. list_add_tail_rcu(&ops->list, &net->rules_ops);
  132. err = 0;
  133. errout:
  134. spin_unlock(&net->rules_mod_lock);
  135. return err;
  136. }
  137. struct fib_rules_ops *
  138. fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
  139. {
  140. struct fib_rules_ops *ops;
  141. int err;
  142. ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
  143. if (ops == NULL)
  144. return ERR_PTR(-ENOMEM);
  145. INIT_LIST_HEAD(&ops->rules_list);
  146. ops->fro_net = net;
  147. err = __fib_rules_register(ops);
  148. if (err) {
  149. kfree(ops);
  150. ops = ERR_PTR(err);
  151. }
  152. return ops;
  153. }
  154. EXPORT_SYMBOL_GPL(fib_rules_register);
  155. static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
  156. {
  157. struct fib_rule *rule, *tmp;
  158. list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
  159. list_del_rcu(&rule->list);
  160. if (ops->delete)
  161. ops->delete(rule);
  162. fib_rule_put(rule);
  163. }
  164. }
  165. void fib_rules_unregister(struct fib_rules_ops *ops)
  166. {
  167. struct net *net = ops->fro_net;
  168. spin_lock(&net->rules_mod_lock);
  169. list_del_rcu(&ops->list);
  170. spin_unlock(&net->rules_mod_lock);
  171. fib_rules_cleanup_ops(ops);
  172. kfree_rcu(ops, rcu);
  173. }
  174. EXPORT_SYMBOL_GPL(fib_rules_unregister);
  175. static int uid_range_set(struct fib_kuid_range *range)
  176. {
  177. return uid_valid(range->start) && uid_valid(range->end);
  178. }
  179. static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
  180. {
  181. struct fib_rule_uid_range *in;
  182. struct fib_kuid_range out;
  183. in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
  184. out.start = make_kuid(current_user_ns(), in->start);
  185. out.end = make_kuid(current_user_ns(), in->end);
  186. return out;
  187. }
  188. static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
  189. {
  190. struct fib_rule_uid_range out = {
  191. from_kuid_munged(current_user_ns(), range->start),
  192. from_kuid_munged(current_user_ns(), range->end)
  193. };
  194. return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
  195. }
  196. static int nla_get_port_range(struct nlattr *pattr,
  197. struct fib_rule_port_range *port_range)
  198. {
  199. const struct fib_rule_port_range *pr = nla_data(pattr);
  200. if (!fib_rule_port_range_valid(pr))
  201. return -EINVAL;
  202. port_range->start = pr->start;
  203. port_range->end = pr->end;
  204. return 0;
  205. }
  206. static int nla_put_port_range(struct sk_buff *skb, int attrtype,
  207. struct fib_rule_port_range *range)
  208. {
  209. return nla_put(skb, attrtype, sizeof(*range), range);
  210. }
  211. static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
  212. struct flowi *fl, int flags,
  213. struct fib_lookup_arg *arg)
  214. {
  215. int iifindex, oifindex, ret = 0;
  216. iifindex = READ_ONCE(rule->iifindex);
  217. if (iifindex && (iifindex != fl->flowi_iif))
  218. goto out;
  219. oifindex = READ_ONCE(rule->oifindex);
  220. if (oifindex && (oifindex != fl->flowi_oif))
  221. goto out;
  222. if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
  223. goto out;
  224. if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
  225. goto out;
  226. if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
  227. goto out;
  228. if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
  229. uid_gt(fl->flowi_uid, rule->uid_range.end))
  230. goto out;
  231. ret = INDIRECT_CALL_MT(ops->match,
  232. fib6_rule_match,
  233. fib4_rule_match,
  234. rule, fl, flags);
  235. out:
  236. return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
  237. }
  238. int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
  239. int flags, struct fib_lookup_arg *arg)
  240. {
  241. struct fib_rule *rule;
  242. int err;
  243. rcu_read_lock();
  244. list_for_each_entry_rcu(rule, &ops->rules_list, list) {
  245. jumped:
  246. if (!fib_rule_match(rule, ops, fl, flags, arg))
  247. continue;
  248. if (rule->action == FR_ACT_GOTO) {
  249. struct fib_rule *target;
  250. target = rcu_dereference(rule->ctarget);
  251. if (target == NULL) {
  252. continue;
  253. } else {
  254. rule = target;
  255. goto jumped;
  256. }
  257. } else if (rule->action == FR_ACT_NOP)
  258. continue;
  259. else
  260. err = INDIRECT_CALL_MT(ops->action,
  261. fib6_rule_action,
  262. fib4_rule_action,
  263. rule, fl, flags, arg);
  264. if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
  265. fib6_rule_suppress,
  266. fib4_rule_suppress,
  267. rule, flags, arg))
  268. continue;
  269. if (err != -EAGAIN) {
  270. if ((arg->flags & FIB_LOOKUP_NOREF) ||
  271. likely(refcount_inc_not_zero(&rule->refcnt))) {
  272. arg->rule = rule;
  273. goto out;
  274. }
  275. break;
  276. }
  277. }
  278. err = -ESRCH;
  279. out:
  280. rcu_read_unlock();
  281. return err;
  282. }
  283. EXPORT_SYMBOL_GPL(fib_rules_lookup);
  284. static int call_fib_rule_notifier(struct notifier_block *nb,
  285. enum fib_event_type event_type,
  286. struct fib_rule *rule, int family,
  287. struct netlink_ext_ack *extack)
  288. {
  289. struct fib_rule_notifier_info info = {
  290. .info.family = family,
  291. .info.extack = extack,
  292. .rule = rule,
  293. };
  294. return call_fib_notifier(nb, event_type, &info.info);
  295. }
  296. static int call_fib_rule_notifiers(struct net *net,
  297. enum fib_event_type event_type,
  298. struct fib_rule *rule,
  299. struct fib_rules_ops *ops,
  300. struct netlink_ext_ack *extack)
  301. {
  302. struct fib_rule_notifier_info info = {
  303. .info.family = ops->family,
  304. .info.extack = extack,
  305. .rule = rule,
  306. };
  307. ops->fib_rules_seq++;
  308. return call_fib_notifiers(net, event_type, &info.info);
  309. }
  310. /* Called with rcu_read_lock() */
  311. int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
  312. struct netlink_ext_ack *extack)
  313. {
  314. struct fib_rules_ops *ops;
  315. struct fib_rule *rule;
  316. int err = 0;
  317. ops = lookup_rules_ops(net, family);
  318. if (!ops)
  319. return -EAFNOSUPPORT;
  320. list_for_each_entry_rcu(rule, &ops->rules_list, list) {
  321. err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD,
  322. rule, family, extack);
  323. if (err)
  324. break;
  325. }
  326. rules_ops_put(ops);
  327. return err;
  328. }
  329. EXPORT_SYMBOL_GPL(fib_rules_dump);
  330. unsigned int fib_rules_seq_read(struct net *net, int family)
  331. {
  332. unsigned int fib_rules_seq;
  333. struct fib_rules_ops *ops;
  334. ASSERT_RTNL();
  335. ops = lookup_rules_ops(net, family);
  336. if (!ops)
  337. return 0;
  338. fib_rules_seq = ops->fib_rules_seq;
  339. rules_ops_put(ops);
  340. return fib_rules_seq;
  341. }
  342. EXPORT_SYMBOL_GPL(fib_rules_seq_read);
  343. static struct fib_rule *rule_find(struct fib_rules_ops *ops,
  344. struct fib_rule_hdr *frh,
  345. struct nlattr **tb,
  346. struct fib_rule *rule,
  347. bool user_priority)
  348. {
  349. struct fib_rule *r;
  350. list_for_each_entry(r, &ops->rules_list, list) {
  351. if (rule->action && r->action != rule->action)
  352. continue;
  353. if (rule->table && r->table != rule->table)
  354. continue;
  355. if (user_priority && r->pref != rule->pref)
  356. continue;
  357. if (rule->iifname[0] &&
  358. memcmp(r->iifname, rule->iifname, IFNAMSIZ))
  359. continue;
  360. if (rule->oifname[0] &&
  361. memcmp(r->oifname, rule->oifname, IFNAMSIZ))
  362. continue;
  363. if (rule->mark && r->mark != rule->mark)
  364. continue;
  365. if (rule->suppress_ifgroup != -1 &&
  366. r->suppress_ifgroup != rule->suppress_ifgroup)
  367. continue;
  368. if (rule->suppress_prefixlen != -1 &&
  369. r->suppress_prefixlen != rule->suppress_prefixlen)
  370. continue;
  371. if (rule->mark_mask && r->mark_mask != rule->mark_mask)
  372. continue;
  373. if (rule->tun_id && r->tun_id != rule->tun_id)
  374. continue;
  375. if (r->fr_net != rule->fr_net)
  376. continue;
  377. if (rule->l3mdev && r->l3mdev != rule->l3mdev)
  378. continue;
  379. if (uid_range_set(&rule->uid_range) &&
  380. (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
  381. !uid_eq(r->uid_range.end, rule->uid_range.end)))
  382. continue;
  383. if (rule->ip_proto && r->ip_proto != rule->ip_proto)
  384. continue;
  385. if (rule->proto && r->proto != rule->proto)
  386. continue;
  387. if (fib_rule_port_range_set(&rule->sport_range) &&
  388. !fib_rule_port_range_compare(&r->sport_range,
  389. &rule->sport_range))
  390. continue;
  391. if (fib_rule_port_range_set(&rule->dport_range) &&
  392. !fib_rule_port_range_compare(&r->dport_range,
  393. &rule->dport_range))
  394. continue;
  395. if (!ops->compare(r, frh, tb))
  396. continue;
  397. return r;
  398. }
  399. return NULL;
  400. }
  401. #ifdef CONFIG_NET_L3_MASTER_DEV
  402. static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
  403. struct netlink_ext_ack *extack)
  404. {
  405. nlrule->l3mdev = nla_get_u8(nla);
  406. if (nlrule->l3mdev != 1) {
  407. NL_SET_ERR_MSG(extack, "Invalid l3mdev attribute");
  408. return -1;
  409. }
  410. return 0;
  411. }
  412. #else
  413. static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule,
  414. struct netlink_ext_ack *extack)
  415. {
  416. NL_SET_ERR_MSG(extack, "l3mdev support is not enabled in kernel");
  417. return -1;
  418. }
  419. #endif
  420. static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
  421. struct netlink_ext_ack *extack,
  422. struct fib_rules_ops *ops,
  423. struct nlattr *tb[],
  424. struct fib_rule **rule,
  425. bool *user_priority)
  426. {
  427. struct net *net = sock_net(skb->sk);
  428. struct fib_rule_hdr *frh = nlmsg_data(nlh);
  429. struct fib_rule *nlrule = NULL;
  430. int err = -EINVAL;
  431. if (frh->src_len)
  432. if (!tb[FRA_SRC] ||
  433. frh->src_len > (ops->addr_size * 8) ||
  434. nla_len(tb[FRA_SRC]) != ops->addr_size) {
  435. NL_SET_ERR_MSG(extack, "Invalid source address");
  436. goto errout;
  437. }
  438. if (frh->dst_len)
  439. if (!tb[FRA_DST] ||
  440. frh->dst_len > (ops->addr_size * 8) ||
  441. nla_len(tb[FRA_DST]) != ops->addr_size) {
  442. NL_SET_ERR_MSG(extack, "Invalid dst address");
  443. goto errout;
  444. }
  445. nlrule = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT);
  446. if (!nlrule) {
  447. err = -ENOMEM;
  448. goto errout;
  449. }
  450. refcount_set(&nlrule->refcnt, 1);
  451. nlrule->fr_net = net;
  452. if (tb[FRA_PRIORITY]) {
  453. nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]);
  454. *user_priority = true;
  455. } else {
  456. nlrule->pref = fib_default_rule_pref(ops);
  457. }
  458. nlrule->proto = tb[FRA_PROTOCOL] ?
  459. nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
  460. if (tb[FRA_IIFNAME]) {
  461. struct net_device *dev;
  462. nlrule->iifindex = -1;
  463. nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
  464. dev = __dev_get_by_name(net, nlrule->iifname);
  465. if (dev)
  466. nlrule->iifindex = dev->ifindex;
  467. }
  468. if (tb[FRA_OIFNAME]) {
  469. struct net_device *dev;
  470. nlrule->oifindex = -1;
  471. nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
  472. dev = __dev_get_by_name(net, nlrule->oifname);
  473. if (dev)
  474. nlrule->oifindex = dev->ifindex;
  475. }
  476. if (tb[FRA_FWMARK]) {
  477. nlrule->mark = nla_get_u32(tb[FRA_FWMARK]);
  478. if (nlrule->mark)
  479. /* compatibility: if the mark value is non-zero all bits
  480. * are compared unless a mask is explicitly specified.
  481. */
  482. nlrule->mark_mask = 0xFFFFFFFF;
  483. }
  484. if (tb[FRA_FWMASK])
  485. nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
  486. if (tb[FRA_TUN_ID])
  487. nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
  488. if (tb[FRA_L3MDEV] &&
  489. fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0)
  490. goto errout_free;
  491. nlrule->action = frh->action;
  492. nlrule->flags = frh->flags;
  493. nlrule->table = frh_get_table(frh, tb);
  494. if (tb[FRA_SUPPRESS_PREFIXLEN])
  495. nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
  496. else
  497. nlrule->suppress_prefixlen = -1;
  498. if (tb[FRA_SUPPRESS_IFGROUP])
  499. nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
  500. else
  501. nlrule->suppress_ifgroup = -1;
  502. if (tb[FRA_GOTO]) {
  503. if (nlrule->action != FR_ACT_GOTO) {
  504. NL_SET_ERR_MSG(extack, "Unexpected goto");
  505. goto errout_free;
  506. }
  507. nlrule->target = nla_get_u32(tb[FRA_GOTO]);
  508. /* Backward jumps are prohibited to avoid endless loops */
  509. if (nlrule->target <= nlrule->pref) {
  510. NL_SET_ERR_MSG(extack, "Backward goto not supported");
  511. goto errout_free;
  512. }
  513. } else if (nlrule->action == FR_ACT_GOTO) {
  514. NL_SET_ERR_MSG(extack, "Missing goto target for action goto");
  515. goto errout_free;
  516. }
  517. if (nlrule->l3mdev && nlrule->table) {
  518. NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive");
  519. goto errout_free;
  520. }
  521. if (tb[FRA_UID_RANGE]) {
  522. if (current_user_ns() != net->user_ns) {
  523. err = -EPERM;
  524. NL_SET_ERR_MSG(extack, "No permission to set uid");
  525. goto errout_free;
  526. }
  527. nlrule->uid_range = nla_get_kuid_range(tb);
  528. if (!uid_range_set(&nlrule->uid_range) ||
  529. !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) {
  530. NL_SET_ERR_MSG(extack, "Invalid uid range");
  531. goto errout_free;
  532. }
  533. } else {
  534. nlrule->uid_range = fib_kuid_range_unset;
  535. }
  536. if (tb[FRA_IP_PROTO])
  537. nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
  538. if (tb[FRA_SPORT_RANGE]) {
  539. err = nla_get_port_range(tb[FRA_SPORT_RANGE],
  540. &nlrule->sport_range);
  541. if (err) {
  542. NL_SET_ERR_MSG(extack, "Invalid sport range");
  543. goto errout_free;
  544. }
  545. }
  546. if (tb[FRA_DPORT_RANGE]) {
  547. err = nla_get_port_range(tb[FRA_DPORT_RANGE],
  548. &nlrule->dport_range);
  549. if (err) {
  550. NL_SET_ERR_MSG(extack, "Invalid dport range");
  551. goto errout_free;
  552. }
  553. }
  554. *rule = nlrule;
  555. return 0;
  556. errout_free:
  557. kfree(nlrule);
  558. errout:
  559. return err;
  560. }
  561. static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
  562. struct nlattr **tb, struct fib_rule *rule)
  563. {
  564. struct fib_rule *r;
  565. list_for_each_entry(r, &ops->rules_list, list) {
  566. if (r->action != rule->action)
  567. continue;
  568. if (r->table != rule->table)
  569. continue;
  570. if (r->pref != rule->pref)
  571. continue;
  572. if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
  573. continue;
  574. if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
  575. continue;
  576. if (r->mark != rule->mark)
  577. continue;
  578. if (r->suppress_ifgroup != rule->suppress_ifgroup)
  579. continue;
  580. if (r->suppress_prefixlen != rule->suppress_prefixlen)
  581. continue;
  582. if (r->mark_mask != rule->mark_mask)
  583. continue;
  584. if (r->tun_id != rule->tun_id)
  585. continue;
  586. if (r->fr_net != rule->fr_net)
  587. continue;
  588. if (r->l3mdev != rule->l3mdev)
  589. continue;
  590. if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
  591. !uid_eq(r->uid_range.end, rule->uid_range.end))
  592. continue;
  593. if (r->ip_proto != rule->ip_proto)
  594. continue;
  595. if (r->proto != rule->proto)
  596. continue;
  597. if (!fib_rule_port_range_compare(&r->sport_range,
  598. &rule->sport_range))
  599. continue;
  600. if (!fib_rule_port_range_compare(&r->dport_range,
  601. &rule->dport_range))
  602. continue;
  603. if (!ops->compare(r, frh, tb))
  604. continue;
  605. return 1;
  606. }
  607. return 0;
  608. }
  609. static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = {
  610. [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 },
  611. [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
  612. [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
  613. [FRA_PRIORITY] = { .type = NLA_U32 },
  614. [FRA_FWMARK] = { .type = NLA_U32 },
  615. [FRA_FLOW] = { .type = NLA_U32 },
  616. [FRA_TUN_ID] = { .type = NLA_U64 },
  617. [FRA_FWMASK] = { .type = NLA_U32 },
  618. [FRA_TABLE] = { .type = NLA_U32 },
  619. [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 },
  620. [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 },
  621. [FRA_GOTO] = { .type = NLA_U32 },
  622. [FRA_L3MDEV] = { .type = NLA_U8 },
  623. [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) },
  624. [FRA_PROTOCOL] = { .type = NLA_U8 },
  625. [FRA_IP_PROTO] = { .type = NLA_U8 },
  626. [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
  627. [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
  628. [FRA_DSCP] = NLA_POLICY_MAX(NLA_U8, INET_DSCP_MASK >> 2),
  629. };
  630. int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
  631. struct netlink_ext_ack *extack)
  632. {
  633. struct net *net = sock_net(skb->sk);
  634. struct fib_rule_hdr *frh = nlmsg_data(nlh);
  635. struct fib_rules_ops *ops = NULL;
  636. struct fib_rule *rule = NULL, *r, *last = NULL;
  637. struct nlattr *tb[FRA_MAX + 1];
  638. int err = -EINVAL, unresolved = 0;
  639. bool user_priority = false;
  640. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
  641. NL_SET_ERR_MSG(extack, "Invalid msg length");
  642. goto errout;
  643. }
  644. ops = lookup_rules_ops(net, frh->family);
  645. if (!ops) {
  646. err = -EAFNOSUPPORT;
  647. NL_SET_ERR_MSG(extack, "Rule family not supported");
  648. goto errout;
  649. }
  650. err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
  651. fib_rule_policy, extack);
  652. if (err < 0) {
  653. NL_SET_ERR_MSG(extack, "Error parsing msg");
  654. goto errout;
  655. }
  656. err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
  657. if (err)
  658. goto errout;
  659. if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
  660. rule_exists(ops, frh, tb, rule)) {
  661. err = -EEXIST;
  662. goto errout_free;
  663. }
  664. err = ops->configure(rule, skb, frh, tb, extack);
  665. if (err < 0)
  666. goto errout_free;
  667. err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops,
  668. extack);
  669. if (err < 0)
  670. goto errout_free;
  671. list_for_each_entry(r, &ops->rules_list, list) {
  672. if (r->pref == rule->target) {
  673. RCU_INIT_POINTER(rule->ctarget, r);
  674. break;
  675. }
  676. }
  677. if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
  678. unresolved = 1;
  679. list_for_each_entry(r, &ops->rules_list, list) {
  680. if (r->pref > rule->pref)
  681. break;
  682. last = r;
  683. }
  684. if (last)
  685. list_add_rcu(&rule->list, &last->list);
  686. else
  687. list_add_rcu(&rule->list, &ops->rules_list);
  688. if (ops->unresolved_rules) {
  689. /*
  690. * There are unresolved goto rules in the list, check if
  691. * any of them are pointing to this new rule.
  692. */
  693. list_for_each_entry(r, &ops->rules_list, list) {
  694. if (r->action == FR_ACT_GOTO &&
  695. r->target == rule->pref &&
  696. rtnl_dereference(r->ctarget) == NULL) {
  697. rcu_assign_pointer(r->ctarget, rule);
  698. if (--ops->unresolved_rules == 0)
  699. break;
  700. }
  701. }
  702. }
  703. if (rule->action == FR_ACT_GOTO)
  704. ops->nr_goto_rules++;
  705. if (unresolved)
  706. ops->unresolved_rules++;
  707. if (rule->tun_id)
  708. ip_tunnel_need_metadata();
  709. notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
  710. flush_route_cache(ops);
  711. rules_ops_put(ops);
  712. return 0;
  713. errout_free:
  714. kfree(rule);
  715. errout:
  716. rules_ops_put(ops);
  717. return err;
  718. }
  719. EXPORT_SYMBOL_GPL(fib_nl_newrule);
  720. int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
  721. struct netlink_ext_ack *extack)
  722. {
  723. struct net *net = sock_net(skb->sk);
  724. struct fib_rule_hdr *frh = nlmsg_data(nlh);
  725. struct fib_rules_ops *ops = NULL;
  726. struct fib_rule *rule = NULL, *r, *nlrule = NULL;
  727. struct nlattr *tb[FRA_MAX+1];
  728. int err = -EINVAL;
  729. bool user_priority = false;
  730. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
  731. NL_SET_ERR_MSG(extack, "Invalid msg length");
  732. goto errout;
  733. }
  734. ops = lookup_rules_ops(net, frh->family);
  735. if (ops == NULL) {
  736. err = -EAFNOSUPPORT;
  737. NL_SET_ERR_MSG(extack, "Rule family not supported");
  738. goto errout;
  739. }
  740. err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
  741. fib_rule_policy, extack);
  742. if (err < 0) {
  743. NL_SET_ERR_MSG(extack, "Error parsing msg");
  744. goto errout;
  745. }
  746. err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
  747. if (err)
  748. goto errout;
  749. rule = rule_find(ops, frh, tb, nlrule, user_priority);
  750. if (!rule) {
  751. err = -ENOENT;
  752. goto errout;
  753. }
  754. if (rule->flags & FIB_RULE_PERMANENT) {
  755. err = -EPERM;
  756. goto errout;
  757. }
  758. if (ops->delete) {
  759. err = ops->delete(rule);
  760. if (err)
  761. goto errout;
  762. }
  763. if (rule->tun_id)
  764. ip_tunnel_unneed_metadata();
  765. list_del_rcu(&rule->list);
  766. if (rule->action == FR_ACT_GOTO) {
  767. ops->nr_goto_rules--;
  768. if (rtnl_dereference(rule->ctarget) == NULL)
  769. ops->unresolved_rules--;
  770. }
  771. /*
  772. * Check if this rule is a target to any of them. If so,
  773. * adjust to the next one with the same preference or
  774. * disable them. As this operation is eventually very
  775. * expensive, it is only performed if goto rules, except
  776. * current if it is goto rule, have actually been added.
  777. */
  778. if (ops->nr_goto_rules > 0) {
  779. struct fib_rule *n;
  780. n = list_next_entry(rule, list);
  781. if (&n->list == &ops->rules_list || n->pref != rule->pref)
  782. n = NULL;
  783. list_for_each_entry(r, &ops->rules_list, list) {
  784. if (rtnl_dereference(r->ctarget) != rule)
  785. continue;
  786. rcu_assign_pointer(r->ctarget, n);
  787. if (!n)
  788. ops->unresolved_rules++;
  789. }
  790. }
  791. call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
  792. NULL);
  793. notify_rule_change(RTM_DELRULE, rule, ops, nlh,
  794. NETLINK_CB(skb).portid);
  795. fib_rule_put(rule);
  796. flush_route_cache(ops);
  797. rules_ops_put(ops);
  798. kfree(nlrule);
  799. return 0;
  800. errout:
  801. kfree(nlrule);
  802. rules_ops_put(ops);
  803. return err;
  804. }
  805. EXPORT_SYMBOL_GPL(fib_nl_delrule);
  806. static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
  807. struct fib_rule *rule)
  808. {
  809. size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
  810. + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
  811. + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
  812. + nla_total_size(4) /* FRA_PRIORITY */
  813. + nla_total_size(4) /* FRA_TABLE */
  814. + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
  815. + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
  816. + nla_total_size(4) /* FRA_FWMARK */
  817. + nla_total_size(4) /* FRA_FWMASK */
  818. + nla_total_size_64bit(8) /* FRA_TUN_ID */
  819. + nla_total_size(sizeof(struct fib_kuid_range))
  820. + nla_total_size(1) /* FRA_PROTOCOL */
  821. + nla_total_size(1) /* FRA_IP_PROTO */
  822. + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */
  823. + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */
  824. if (ops->nlmsg_payload)
  825. payload += ops->nlmsg_payload(rule);
  826. return payload;
  827. }
  828. static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
  829. u32 pid, u32 seq, int type, int flags,
  830. struct fib_rules_ops *ops)
  831. {
  832. struct nlmsghdr *nlh;
  833. struct fib_rule_hdr *frh;
  834. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
  835. if (nlh == NULL)
  836. return -EMSGSIZE;
  837. frh = nlmsg_data(nlh);
  838. frh->family = ops->family;
  839. frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
  840. if (nla_put_u32(skb, FRA_TABLE, rule->table))
  841. goto nla_put_failure;
  842. if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
  843. goto nla_put_failure;
  844. frh->res1 = 0;
  845. frh->res2 = 0;
  846. frh->action = rule->action;
  847. frh->flags = rule->flags;
  848. if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto))
  849. goto nla_put_failure;
  850. if (rule->action == FR_ACT_GOTO &&
  851. rcu_access_pointer(rule->ctarget) == NULL)
  852. frh->flags |= FIB_RULE_UNRESOLVED;
  853. if (rule->iifname[0]) {
  854. if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
  855. goto nla_put_failure;
  856. if (READ_ONCE(rule->iifindex) == -1)
  857. frh->flags |= FIB_RULE_IIF_DETACHED;
  858. }
  859. if (rule->oifname[0]) {
  860. if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
  861. goto nla_put_failure;
  862. if (READ_ONCE(rule->oifindex) == -1)
  863. frh->flags |= FIB_RULE_OIF_DETACHED;
  864. }
  865. if ((rule->pref &&
  866. nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
  867. (rule->mark &&
  868. nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
  869. ((rule->mark_mask || rule->mark) &&
  870. nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
  871. (rule->target &&
  872. nla_put_u32(skb, FRA_GOTO, rule->target)) ||
  873. (rule->tun_id &&
  874. nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
  875. (rule->l3mdev &&
  876. nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
  877. (uid_range_set(&rule->uid_range) &&
  878. nla_put_uid_range(skb, &rule->uid_range)) ||
  879. (fib_rule_port_range_set(&rule->sport_range) &&
  880. nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) ||
  881. (fib_rule_port_range_set(&rule->dport_range) &&
  882. nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) ||
  883. (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto)))
  884. goto nla_put_failure;
  885. if (rule->suppress_ifgroup != -1) {
  886. if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
  887. goto nla_put_failure;
  888. }
  889. if (ops->fill(rule, skb, frh) < 0)
  890. goto nla_put_failure;
  891. nlmsg_end(skb, nlh);
  892. return 0;
  893. nla_put_failure:
  894. nlmsg_cancel(skb, nlh);
  895. return -EMSGSIZE;
  896. }
  897. static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
  898. struct fib_rules_ops *ops)
  899. {
  900. int idx = 0;
  901. struct fib_rule *rule;
  902. int err = 0;
  903. rcu_read_lock();
  904. list_for_each_entry_rcu(rule, &ops->rules_list, list) {
  905. if (idx < cb->args[1])
  906. goto skip;
  907. err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
  908. cb->nlh->nlmsg_seq, RTM_NEWRULE,
  909. NLM_F_MULTI, ops);
  910. if (err)
  911. break;
  912. skip:
  913. idx++;
  914. }
  915. rcu_read_unlock();
  916. cb->args[1] = idx;
  917. rules_ops_put(ops);
  918. return err;
  919. }
  920. static int fib_valid_dumprule_req(const struct nlmsghdr *nlh,
  921. struct netlink_ext_ack *extack)
  922. {
  923. struct fib_rule_hdr *frh;
  924. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
  925. NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request");
  926. return -EINVAL;
  927. }
  928. frh = nlmsg_data(nlh);
  929. if (frh->dst_len || frh->src_len || frh->tos || frh->table ||
  930. frh->res1 || frh->res2 || frh->action || frh->flags) {
  931. NL_SET_ERR_MSG(extack,
  932. "Invalid values in header for fib rule dump request");
  933. return -EINVAL;
  934. }
  935. if (nlmsg_attrlen(nlh, sizeof(*frh))) {
  936. NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request");
  937. return -EINVAL;
  938. }
  939. return 0;
  940. }
  941. static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
  942. {
  943. const struct nlmsghdr *nlh = cb->nlh;
  944. struct net *net = sock_net(skb->sk);
  945. struct fib_rules_ops *ops;
  946. int err, idx = 0, family;
  947. if (cb->strict_check) {
  948. err = fib_valid_dumprule_req(nlh, cb->extack);
  949. if (err < 0)
  950. return err;
  951. }
  952. family = rtnl_msg_family(nlh);
  953. if (family != AF_UNSPEC) {
  954. /* Protocol specific dump request */
  955. ops = lookup_rules_ops(net, family);
  956. if (ops == NULL)
  957. return -EAFNOSUPPORT;
  958. return dump_rules(skb, cb, ops);
  959. }
  960. err = 0;
  961. rcu_read_lock();
  962. list_for_each_entry_rcu(ops, &net->rules_ops, list) {
  963. if (idx < cb->args[0] || !try_module_get(ops->owner))
  964. goto skip;
  965. err = dump_rules(skb, cb, ops);
  966. if (err < 0)
  967. break;
  968. cb->args[1] = 0;
  969. skip:
  970. idx++;
  971. }
  972. rcu_read_unlock();
  973. cb->args[0] = idx;
  974. return err;
  975. }
  976. static void notify_rule_change(int event, struct fib_rule *rule,
  977. struct fib_rules_ops *ops, struct nlmsghdr *nlh,
  978. u32 pid)
  979. {
  980. struct net *net;
  981. struct sk_buff *skb;
  982. int err = -ENOMEM;
  983. net = ops->fro_net;
  984. skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
  985. if (skb == NULL)
  986. goto errout;
  987. err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
  988. if (err < 0) {
  989. /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
  990. WARN_ON(err == -EMSGSIZE);
  991. kfree_skb(skb);
  992. goto errout;
  993. }
  994. rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
  995. return;
  996. errout:
  997. rtnl_set_sk_err(net, ops->nlgroup, err);
  998. }
  999. static void attach_rules(struct list_head *rules, struct net_device *dev)
  1000. {
  1001. struct fib_rule *rule;
  1002. list_for_each_entry(rule, rules, list) {
  1003. if (rule->iifindex == -1 &&
  1004. strcmp(dev->name, rule->iifname) == 0)
  1005. WRITE_ONCE(rule->iifindex, dev->ifindex);
  1006. if (rule->oifindex == -1 &&
  1007. strcmp(dev->name, rule->oifname) == 0)
  1008. WRITE_ONCE(rule->oifindex, dev->ifindex);
  1009. }
  1010. }
  1011. static void detach_rules(struct list_head *rules, struct net_device *dev)
  1012. {
  1013. struct fib_rule *rule;
  1014. list_for_each_entry(rule, rules, list) {
  1015. if (rule->iifindex == dev->ifindex)
  1016. WRITE_ONCE(rule->iifindex, -1);
  1017. if (rule->oifindex == dev->ifindex)
  1018. WRITE_ONCE(rule->oifindex, -1);
  1019. }
  1020. }
  1021. static int fib_rules_event(struct notifier_block *this, unsigned long event,
  1022. void *ptr)
  1023. {
  1024. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1025. struct net *net = dev_net(dev);
  1026. struct fib_rules_ops *ops;
  1027. ASSERT_RTNL();
  1028. switch (event) {
  1029. case NETDEV_REGISTER:
  1030. list_for_each_entry(ops, &net->rules_ops, list)
  1031. attach_rules(&ops->rules_list, dev);
  1032. break;
  1033. case NETDEV_CHANGENAME:
  1034. list_for_each_entry(ops, &net->rules_ops, list) {
  1035. detach_rules(&ops->rules_list, dev);
  1036. attach_rules(&ops->rules_list, dev);
  1037. }
  1038. break;
  1039. case NETDEV_UNREGISTER:
  1040. list_for_each_entry(ops, &net->rules_ops, list)
  1041. detach_rules(&ops->rules_list, dev);
  1042. break;
  1043. }
  1044. return NOTIFY_DONE;
  1045. }
  1046. static struct notifier_block fib_rules_notifier = {
  1047. .notifier_call = fib_rules_event,
  1048. };
  1049. static int __net_init fib_rules_net_init(struct net *net)
  1050. {
  1051. INIT_LIST_HEAD(&net->rules_ops);
  1052. spin_lock_init(&net->rules_mod_lock);
  1053. return 0;
  1054. }
  1055. static void __net_exit fib_rules_net_exit(struct net *net)
  1056. {
  1057. WARN_ON_ONCE(!list_empty(&net->rules_ops));
  1058. }
  1059. static struct pernet_operations fib_rules_net_ops = {
  1060. .init = fib_rules_net_init,
  1061. .exit = fib_rules_net_exit,
  1062. };
  1063. static int __init fib_rules_init(void)
  1064. {
  1065. int err;
  1066. rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
  1067. rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
  1068. rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule,
  1069. RTNL_FLAG_DUMP_UNLOCKED);
  1070. err = register_pernet_subsys(&fib_rules_net_ops);
  1071. if (err < 0)
  1072. goto fail;
  1073. err = register_netdevice_notifier(&fib_rules_notifier);
  1074. if (err < 0)
  1075. goto fail_unregister;
  1076. return 0;
  1077. fail_unregister:
  1078. unregister_pernet_subsys(&fib_rules_net_ops);
  1079. fail:
  1080. rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
  1081. rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
  1082. rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
  1083. return err;
  1084. }
  1085. subsys_initcall(fib_rules_init);