sch_fifo.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/sched/sch_fifo.c The simplest FIFO queue.
  4. *
  5. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/slab.h>
  9. #include <linux/types.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/skbuff.h>
  13. #include <net/pkt_sched.h>
  14. #include <net/pkt_cls.h>
  15. /* 1 band FIFO pseudo-"scheduler" */
  16. static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  17. struct sk_buff **to_free)
  18. {
  19. if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
  20. READ_ONCE(sch->limit)))
  21. return qdisc_enqueue_tail(skb, sch);
  22. return qdisc_drop(skb, sch, to_free);
  23. }
  24. static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  25. struct sk_buff **to_free)
  26. {
  27. if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
  28. return qdisc_enqueue_tail(skb, sch);
  29. return qdisc_drop(skb, sch, to_free);
  30. }
  31. static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  32. struct sk_buff **to_free)
  33. {
  34. unsigned int prev_backlog;
  35. if (unlikely(READ_ONCE(sch->limit) == 0))
  36. return qdisc_drop(skb, sch, to_free);
  37. if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
  38. return qdisc_enqueue_tail(skb, sch);
  39. prev_backlog = sch->qstats.backlog;
  40. /* queue full, remove one skb to fulfill the limit */
  41. __qdisc_queue_drop_head(sch, &sch->q, to_free);
  42. qdisc_qstats_drop(sch);
  43. qdisc_enqueue_tail(skb, sch);
  44. qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
  45. return NET_XMIT_CN;
  46. }
  47. static void fifo_offload_init(struct Qdisc *sch)
  48. {
  49. struct net_device *dev = qdisc_dev(sch);
  50. struct tc_fifo_qopt_offload qopt;
  51. if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
  52. return;
  53. qopt.command = TC_FIFO_REPLACE;
  54. qopt.handle = sch->handle;
  55. qopt.parent = sch->parent;
  56. dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
  57. }
  58. static void fifo_offload_destroy(struct Qdisc *sch)
  59. {
  60. struct net_device *dev = qdisc_dev(sch);
  61. struct tc_fifo_qopt_offload qopt;
  62. if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
  63. return;
  64. qopt.command = TC_FIFO_DESTROY;
  65. qopt.handle = sch->handle;
  66. qopt.parent = sch->parent;
  67. dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
  68. }
  69. static int fifo_offload_dump(struct Qdisc *sch)
  70. {
  71. struct tc_fifo_qopt_offload qopt;
  72. qopt.command = TC_FIFO_STATS;
  73. qopt.handle = sch->handle;
  74. qopt.parent = sch->parent;
  75. qopt.stats.bstats = &sch->bstats;
  76. qopt.stats.qstats = &sch->qstats;
  77. return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
  78. }
  79. static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
  80. struct netlink_ext_ack *extack)
  81. {
  82. bool bypass;
  83. bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
  84. if (opt == NULL) {
  85. u32 limit = qdisc_dev(sch)->tx_queue_len;
  86. if (is_bfifo)
  87. limit *= psched_mtu(qdisc_dev(sch));
  88. WRITE_ONCE(sch->limit, limit);
  89. } else {
  90. struct tc_fifo_qopt *ctl = nla_data(opt);
  91. if (nla_len(opt) < sizeof(*ctl))
  92. return -EINVAL;
  93. WRITE_ONCE(sch->limit, ctl->limit);
  94. }
  95. if (is_bfifo)
  96. bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
  97. else
  98. bypass = sch->limit >= 1;
  99. if (bypass)
  100. sch->flags |= TCQ_F_CAN_BYPASS;
  101. else
  102. sch->flags &= ~TCQ_F_CAN_BYPASS;
  103. return 0;
  104. }
  105. static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
  106. struct netlink_ext_ack *extack)
  107. {
  108. int err;
  109. err = __fifo_init(sch, opt, extack);
  110. if (err)
  111. return err;
  112. fifo_offload_init(sch);
  113. return 0;
  114. }
  115. static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
  116. struct netlink_ext_ack *extack)
  117. {
  118. return __fifo_init(sch, opt, extack);
  119. }
  120. static void fifo_destroy(struct Qdisc *sch)
  121. {
  122. fifo_offload_destroy(sch);
  123. }
  124. static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
  125. {
  126. struct tc_fifo_qopt opt = { .limit = READ_ONCE(sch->limit) };
  127. if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  128. goto nla_put_failure;
  129. return skb->len;
  130. nla_put_failure:
  131. return -1;
  132. }
  133. static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
  134. {
  135. int err;
  136. err = fifo_offload_dump(sch);
  137. if (err)
  138. return err;
  139. return __fifo_dump(sch, skb);
  140. }
  141. static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
  142. {
  143. return __fifo_dump(sch, skb);
  144. }
  145. struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
  146. .id = "pfifo",
  147. .priv_size = 0,
  148. .enqueue = pfifo_enqueue,
  149. .dequeue = qdisc_dequeue_head,
  150. .peek = qdisc_peek_head,
  151. .init = fifo_init,
  152. .destroy = fifo_destroy,
  153. .reset = qdisc_reset_queue,
  154. .change = fifo_init,
  155. .dump = fifo_dump,
  156. .owner = THIS_MODULE,
  157. };
  158. EXPORT_SYMBOL(pfifo_qdisc_ops);
  159. struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
  160. .id = "bfifo",
  161. .priv_size = 0,
  162. .enqueue = bfifo_enqueue,
  163. .dequeue = qdisc_dequeue_head,
  164. .peek = qdisc_peek_head,
  165. .init = fifo_init,
  166. .destroy = fifo_destroy,
  167. .reset = qdisc_reset_queue,
  168. .change = fifo_init,
  169. .dump = fifo_dump,
  170. .owner = THIS_MODULE,
  171. };
  172. EXPORT_SYMBOL(bfifo_qdisc_ops);
  173. struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
  174. .id = "pfifo_head_drop",
  175. .priv_size = 0,
  176. .enqueue = pfifo_tail_enqueue,
  177. .dequeue = qdisc_dequeue_head,
  178. .peek = qdisc_peek_head,
  179. .init = fifo_hd_init,
  180. .reset = qdisc_reset_queue,
  181. .change = fifo_hd_init,
  182. .dump = fifo_hd_dump,
  183. .owner = THIS_MODULE,
  184. };
  185. /* Pass size change message down to embedded FIFO */
  186. int fifo_set_limit(struct Qdisc *q, unsigned int limit)
  187. {
  188. struct nlattr *nla;
  189. int ret = -ENOMEM;
  190. /* Hack to avoid sending change message to non-FIFO */
  191. if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
  192. return 0;
  193. if (!q->ops->change)
  194. return 0;
  195. nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
  196. if (nla) {
  197. nla->nla_type = RTM_NEWQDISC;
  198. nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
  199. ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
  200. ret = q->ops->change(q, nla, NULL);
  201. kfree(nla);
  202. }
  203. return ret;
  204. }
  205. EXPORT_SYMBOL(fifo_set_limit);
  206. struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
  207. unsigned int limit,
  208. struct netlink_ext_ack *extack)
  209. {
  210. struct Qdisc *q;
  211. int err = -ENOMEM;
  212. q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
  213. extack);
  214. if (q) {
  215. err = fifo_set_limit(q, limit);
  216. if (err < 0) {
  217. qdisc_put(q);
  218. q = NULL;
  219. }
  220. }
  221. return q ? : ERR_PTR(err);
  222. }
  223. EXPORT_SYMBOL(fifo_create_dflt);
  224. MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");