sch_ingress.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* net/sched/sch_ingress.c - Ingress and clsact qdisc
  3. *
  4. * Authors: Jamal Hadi Salim 1999
  5. */
  6. #include <linux/module.h>
  7. #include <linux/types.h>
  8. #include <linux/list.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/rtnetlink.h>
  11. #include <net/netlink.h>
  12. #include <net/pkt_sched.h>
  13. #include <net/pkt_cls.h>
  14. #include <net/tcx.h>
  15. struct ingress_sched_data {
  16. struct tcf_block *block;
  17. struct tcf_block_ext_info block_info;
  18. struct mini_Qdisc_pair miniqp;
  19. };
  20. static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
  21. {
  22. return NULL;
  23. }
  24. static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
  25. {
  26. return TC_H_MIN(classid) + 1;
  27. }
  28. static unsigned long ingress_bind_filter(struct Qdisc *sch,
  29. unsigned long parent, u32 classid)
  30. {
  31. return ingress_find(sch, classid);
  32. }
  33. static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
  34. {
  35. }
  36. static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
  37. {
  38. }
  39. static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
  40. struct netlink_ext_ack *extack)
  41. {
  42. struct ingress_sched_data *q = qdisc_priv(sch);
  43. return q->block;
  44. }
  45. static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
  46. {
  47. struct mini_Qdisc_pair *miniqp = priv;
  48. mini_qdisc_pair_swap(miniqp, tp_head);
  49. };
  50. static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
  51. {
  52. struct ingress_sched_data *q = qdisc_priv(sch);
  53. q->block_info.block_index = block_index;
  54. }
  55. static u32 ingress_ingress_block_get(struct Qdisc *sch)
  56. {
  57. struct ingress_sched_data *q = qdisc_priv(sch);
  58. return q->block_info.block_index;
  59. }
  60. static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
  61. struct netlink_ext_ack *extack)
  62. {
  63. struct ingress_sched_data *q = qdisc_priv(sch);
  64. struct net_device *dev = qdisc_dev(sch);
  65. struct bpf_mprog_entry *entry;
  66. bool created;
  67. int err;
  68. if (sch->parent != TC_H_INGRESS)
  69. return -EOPNOTSUPP;
  70. net_inc_ingress_queue();
  71. entry = tcx_entry_fetch_or_create(dev, true, &created);
  72. if (!entry)
  73. return -ENOMEM;
  74. tcx_miniq_inc(entry);
  75. mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq);
  76. if (created)
  77. tcx_entry_update(dev, entry, true);
  78. q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
  79. q->block_info.chain_head_change = clsact_chain_head_change;
  80. q->block_info.chain_head_change_priv = &q->miniqp;
  81. err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
  82. if (err)
  83. return err;
  84. mini_qdisc_pair_block_init(&q->miniqp, q->block);
  85. return 0;
  86. }
  87. static void ingress_destroy(struct Qdisc *sch)
  88. {
  89. struct ingress_sched_data *q = qdisc_priv(sch);
  90. struct net_device *dev = qdisc_dev(sch);
  91. struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress);
  92. if (sch->parent != TC_H_INGRESS)
  93. return;
  94. tcf_block_put_ext(q->block, sch, &q->block_info);
  95. if (entry) {
  96. tcx_miniq_dec(entry);
  97. if (!tcx_entry_is_active(entry)) {
  98. tcx_entry_update(dev, NULL, true);
  99. tcx_entry_free(entry);
  100. }
  101. }
  102. net_dec_ingress_queue();
  103. }
  104. static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
  105. {
  106. struct nlattr *nest;
  107. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  108. if (nest == NULL)
  109. goto nla_put_failure;
  110. return nla_nest_end(skb, nest);
  111. nla_put_failure:
  112. nla_nest_cancel(skb, nest);
  113. return -1;
  114. }
  115. static const struct Qdisc_class_ops ingress_class_ops = {
  116. .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
  117. .leaf = ingress_leaf,
  118. .find = ingress_find,
  119. .walk = ingress_walk,
  120. .tcf_block = ingress_tcf_block,
  121. .bind_tcf = ingress_bind_filter,
  122. .unbind_tcf = ingress_unbind_filter,
  123. };
  124. static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
  125. .cl_ops = &ingress_class_ops,
  126. .id = "ingress",
  127. .priv_size = sizeof(struct ingress_sched_data),
  128. .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
  129. .init = ingress_init,
  130. .destroy = ingress_destroy,
  131. .dump = ingress_dump,
  132. .ingress_block_set = ingress_ingress_block_set,
  133. .ingress_block_get = ingress_ingress_block_get,
  134. .owner = THIS_MODULE,
  135. };
  136. MODULE_ALIAS_NET_SCH("ingress");
  137. struct clsact_sched_data {
  138. struct tcf_block *ingress_block;
  139. struct tcf_block *egress_block;
  140. struct tcf_block_ext_info ingress_block_info;
  141. struct tcf_block_ext_info egress_block_info;
  142. struct mini_Qdisc_pair miniqp_ingress;
  143. struct mini_Qdisc_pair miniqp_egress;
  144. };
  145. static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
  146. {
  147. switch (TC_H_MIN(classid)) {
  148. case TC_H_MIN(TC_H_MIN_INGRESS):
  149. case TC_H_MIN(TC_H_MIN_EGRESS):
  150. return TC_H_MIN(classid);
  151. default:
  152. return 0;
  153. }
  154. }
  155. static unsigned long clsact_bind_filter(struct Qdisc *sch,
  156. unsigned long parent, u32 classid)
  157. {
  158. return clsact_find(sch, classid);
  159. }
  160. static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
  161. struct netlink_ext_ack *extack)
  162. {
  163. struct clsact_sched_data *q = qdisc_priv(sch);
  164. switch (cl) {
  165. case TC_H_MIN(TC_H_MIN_INGRESS):
  166. return q->ingress_block;
  167. case TC_H_MIN(TC_H_MIN_EGRESS):
  168. return q->egress_block;
  169. default:
  170. return NULL;
  171. }
  172. }
  173. static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
  174. {
  175. struct clsact_sched_data *q = qdisc_priv(sch);
  176. q->ingress_block_info.block_index = block_index;
  177. }
  178. static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
  179. {
  180. struct clsact_sched_data *q = qdisc_priv(sch);
  181. q->egress_block_info.block_index = block_index;
  182. }
  183. static u32 clsact_ingress_block_get(struct Qdisc *sch)
  184. {
  185. struct clsact_sched_data *q = qdisc_priv(sch);
  186. return q->ingress_block_info.block_index;
  187. }
  188. static u32 clsact_egress_block_get(struct Qdisc *sch)
  189. {
  190. struct clsact_sched_data *q = qdisc_priv(sch);
  191. return q->egress_block_info.block_index;
  192. }
  193. static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
  194. struct netlink_ext_ack *extack)
  195. {
  196. struct clsact_sched_data *q = qdisc_priv(sch);
  197. struct net_device *dev = qdisc_dev(sch);
  198. struct bpf_mprog_entry *entry;
  199. bool created;
  200. int err;
  201. if (sch->parent != TC_H_CLSACT)
  202. return -EOPNOTSUPP;
  203. net_inc_ingress_queue();
  204. net_inc_egress_queue();
  205. entry = tcx_entry_fetch_or_create(dev, true, &created);
  206. if (!entry)
  207. return -ENOMEM;
  208. tcx_miniq_inc(entry);
  209. mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq);
  210. if (created)
  211. tcx_entry_update(dev, entry, true);
  212. q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
  213. q->ingress_block_info.chain_head_change = clsact_chain_head_change;
  214. q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
  215. err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
  216. extack);
  217. if (err)
  218. return err;
  219. mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
  220. entry = tcx_entry_fetch_or_create(dev, false, &created);
  221. if (!entry)
  222. return -ENOMEM;
  223. tcx_miniq_inc(entry);
  224. mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq);
  225. if (created)
  226. tcx_entry_update(dev, entry, false);
  227. q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
  228. q->egress_block_info.chain_head_change = clsact_chain_head_change;
  229. q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
  230. return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
  231. }
  232. static void clsact_destroy(struct Qdisc *sch)
  233. {
  234. struct clsact_sched_data *q = qdisc_priv(sch);
  235. struct net_device *dev = qdisc_dev(sch);
  236. struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress);
  237. struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress);
  238. if (sch->parent != TC_H_CLSACT)
  239. return;
  240. tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
  241. tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
  242. if (ingress_entry) {
  243. tcx_miniq_dec(ingress_entry);
  244. if (!tcx_entry_is_active(ingress_entry)) {
  245. tcx_entry_update(dev, NULL, true);
  246. tcx_entry_free(ingress_entry);
  247. }
  248. }
  249. if (egress_entry) {
  250. tcx_miniq_dec(egress_entry);
  251. if (!tcx_entry_is_active(egress_entry)) {
  252. tcx_entry_update(dev, NULL, false);
  253. tcx_entry_free(egress_entry);
  254. }
  255. }
  256. net_dec_ingress_queue();
  257. net_dec_egress_queue();
  258. }
  259. static const struct Qdisc_class_ops clsact_class_ops = {
  260. .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
  261. .leaf = ingress_leaf,
  262. .find = clsact_find,
  263. .walk = ingress_walk,
  264. .tcf_block = clsact_tcf_block,
  265. .bind_tcf = clsact_bind_filter,
  266. .unbind_tcf = ingress_unbind_filter,
  267. };
  268. static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
  269. .cl_ops = &clsact_class_ops,
  270. .id = "clsact",
  271. .priv_size = sizeof(struct clsact_sched_data),
  272. .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
  273. .init = clsact_init,
  274. .destroy = clsact_destroy,
  275. .dump = ingress_dump,
  276. .ingress_block_set = clsact_ingress_block_set,
  277. .egress_block_set = clsact_egress_block_set,
  278. .ingress_block_get = clsact_ingress_block_get,
  279. .egress_block_get = clsact_egress_block_get,
  280. .owner = THIS_MODULE,
  281. };
  282. MODULE_ALIAS_NET_SCH("clsact");
  283. static int __init ingress_module_init(void)
  284. {
  285. int ret;
  286. ret = register_qdisc(&ingress_qdisc_ops);
  287. if (!ret) {
  288. ret = register_qdisc(&clsact_qdisc_ops);
  289. if (ret)
  290. unregister_qdisc(&ingress_qdisc_ops);
  291. }
  292. return ret;
  293. }
  294. static void __exit ingress_module_exit(void)
  295. {
  296. unregister_qdisc(&ingress_qdisc_ops);
  297. unregister_qdisc(&clsact_qdisc_ops);
  298. }
  299. module_init(ingress_module_init);
  300. module_exit(ingress_module_exit);
  301. MODULE_LICENSE("GPL");
  302. MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");