sch_fq_pie.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Flow Queue PIE discipline
  3. *
  4. * Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in>
  5. * Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com>
  6. * Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com>
  7. * Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com>
  8. * Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com>
  9. * Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com>
  10. */
  11. #include <linux/jhash.h>
  12. #include <linux/module.h>
  13. #include <linux/sizes.h>
  14. #include <linux/vmalloc.h>
  15. #include <net/pkt_cls.h>
  16. #include <net/pie.h>
  17. /* Flow Queue PIE
  18. *
  19. * Principles:
  20. * - Packets are classified on flows.
  21. * - This is a Stochastic model (as we use a hash, several flows might
  22. * be hashed to the same slot)
  23. * - Each flow has a PIE managed queue.
  24. * - Flows are linked onto two (Round Robin) lists,
  25. * so that new flows have priority on old ones.
  26. * - For a given flow, packets are not reordered.
  27. * - Drops during enqueue only.
  28. * - ECN capability is off by default.
  29. * - ECN threshold (if ECN is enabled) is at 10% by default.
  30. * - Uses timestamps to calculate queue delay by default.
  31. */
  32. /**
  33. * struct fq_pie_flow - contains data for each flow
  34. * @vars: pie vars associated with the flow
  35. * @deficit: number of remaining byte credits
  36. * @backlog: size of data in the flow
  37. * @qlen: number of packets in the flow
  38. * @flowchain: flowchain for the flow
  39. * @head: first packet in the flow
  40. * @tail: last packet in the flow
  41. */
  42. struct fq_pie_flow {
  43. struct pie_vars vars;
  44. s32 deficit;
  45. u32 backlog;
  46. u32 qlen;
  47. struct list_head flowchain;
  48. struct sk_buff *head;
  49. struct sk_buff *tail;
  50. };
  51. struct fq_pie_sched_data {
  52. struct tcf_proto __rcu *filter_list; /* optional external classifier */
  53. struct tcf_block *block;
  54. struct fq_pie_flow *flows;
  55. struct Qdisc *sch;
  56. struct list_head old_flows;
  57. struct list_head new_flows;
  58. struct pie_params p_params;
  59. u32 ecn_prob;
  60. u32 flows_cnt;
  61. u32 flows_cursor;
  62. u32 quantum;
  63. u32 memory_limit;
  64. u32 new_flow_count;
  65. u32 memory_usage;
  66. u32 overmemory;
  67. struct pie_stats stats;
  68. struct timer_list adapt_timer;
  69. };
  70. static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
  71. struct sk_buff *skb)
  72. {
  73. return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
  74. }
  75. static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
  76. int *qerr)
  77. {
  78. struct fq_pie_sched_data *q = qdisc_priv(sch);
  79. struct tcf_proto *filter;
  80. struct tcf_result res;
  81. int result;
  82. if (TC_H_MAJ(skb->priority) == sch->handle &&
  83. TC_H_MIN(skb->priority) > 0 &&
  84. TC_H_MIN(skb->priority) <= q->flows_cnt)
  85. return TC_H_MIN(skb->priority);
  86. filter = rcu_dereference_bh(q->filter_list);
  87. if (!filter)
  88. return fq_pie_hash(q, skb) + 1;
  89. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  90. result = tcf_classify(skb, NULL, filter, &res, false);
  91. if (result >= 0) {
  92. #ifdef CONFIG_NET_CLS_ACT
  93. switch (result) {
  94. case TC_ACT_STOLEN:
  95. case TC_ACT_QUEUED:
  96. case TC_ACT_TRAP:
  97. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  98. fallthrough;
  99. case TC_ACT_SHOT:
  100. return 0;
  101. }
  102. #endif
  103. if (TC_H_MIN(res.classid) <= q->flows_cnt)
  104. return TC_H_MIN(res.classid);
  105. }
  106. return 0;
  107. }
  108. /* add skb to flow queue (tail add) */
  109. static inline void flow_queue_add(struct fq_pie_flow *flow,
  110. struct sk_buff *skb)
  111. {
  112. if (!flow->head)
  113. flow->head = skb;
  114. else
  115. flow->tail->next = skb;
  116. flow->tail = skb;
  117. skb->next = NULL;
  118. }
  119. static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  120. struct sk_buff **to_free)
  121. {
  122. struct fq_pie_sched_data *q = qdisc_priv(sch);
  123. struct fq_pie_flow *sel_flow;
  124. int ret;
  125. u8 memory_limited = false;
  126. u8 enqueue = false;
  127. u32 pkt_len;
  128. u32 idx;
  129. /* Classifies packet into corresponding flow */
  130. idx = fq_pie_classify(skb, sch, &ret);
  131. if (idx == 0) {
  132. if (ret & __NET_XMIT_BYPASS)
  133. qdisc_qstats_drop(sch);
  134. __qdisc_drop(skb, to_free);
  135. return ret;
  136. }
  137. idx--;
  138. sel_flow = &q->flows[idx];
  139. /* Checks whether adding a new packet would exceed memory limit */
  140. get_pie_cb(skb)->mem_usage = skb->truesize;
  141. memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
  142. /* Checks if the qdisc is full */
  143. if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
  144. q->stats.overlimit++;
  145. goto out;
  146. } else if (unlikely(memory_limited)) {
  147. q->overmemory++;
  148. }
  149. if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
  150. sel_flow->backlog, skb->len)) {
  151. enqueue = true;
  152. } else if (q->p_params.ecn &&
  153. sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
  154. INET_ECN_set_ce(skb)) {
  155. /* If packet is ecn capable, mark it if drop probability
  156. * is lower than the parameter ecn_prob, else drop it.
  157. */
  158. q->stats.ecn_mark++;
  159. enqueue = true;
  160. }
  161. if (enqueue) {
  162. /* Set enqueue time only when dq_rate_estimator is disabled. */
  163. if (!q->p_params.dq_rate_estimator)
  164. pie_set_enqueue_time(skb);
  165. pkt_len = qdisc_pkt_len(skb);
  166. q->stats.packets_in++;
  167. q->memory_usage += skb->truesize;
  168. sch->qstats.backlog += pkt_len;
  169. sch->q.qlen++;
  170. flow_queue_add(sel_flow, skb);
  171. if (list_empty(&sel_flow->flowchain)) {
  172. list_add_tail(&sel_flow->flowchain, &q->new_flows);
  173. q->new_flow_count++;
  174. sel_flow->deficit = q->quantum;
  175. sel_flow->qlen = 0;
  176. sel_flow->backlog = 0;
  177. }
  178. sel_flow->qlen++;
  179. sel_flow->backlog += pkt_len;
  180. return NET_XMIT_SUCCESS;
  181. }
  182. out:
  183. q->stats.dropped++;
  184. sel_flow->vars.accu_prob = 0;
  185. __qdisc_drop(skb, to_free);
  186. qdisc_qstats_drop(sch);
  187. return NET_XMIT_CN;
  188. }
  189. static const struct netlink_range_validation fq_pie_q_range = {
  190. .min = 1,
  191. .max = 1 << 20,
  192. };
  193. static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
  194. [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
  195. [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
  196. [TCA_FQ_PIE_TARGET] = {.type = NLA_U32},
  197. [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
  198. [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
  199. [TCA_FQ_PIE_BETA] = {.type = NLA_U32},
  200. [TCA_FQ_PIE_QUANTUM] =
  201. NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
  202. [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
  203. [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
  204. [TCA_FQ_PIE_ECN] = {.type = NLA_U32},
  205. [TCA_FQ_PIE_BYTEMODE] = {.type = NLA_U32},
  206. [TCA_FQ_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
  207. };
  208. static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow)
  209. {
  210. struct sk_buff *skb = flow->head;
  211. flow->head = skb->next;
  212. skb->next = NULL;
  213. return skb;
  214. }
  215. static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
  216. {
  217. struct fq_pie_sched_data *q = qdisc_priv(sch);
  218. struct sk_buff *skb = NULL;
  219. struct fq_pie_flow *flow;
  220. struct list_head *head;
  221. u32 pkt_len;
  222. begin:
  223. head = &q->new_flows;
  224. if (list_empty(head)) {
  225. head = &q->old_flows;
  226. if (list_empty(head))
  227. return NULL;
  228. }
  229. flow = list_first_entry(head, struct fq_pie_flow, flowchain);
  230. /* Flow has exhausted all its credits */
  231. if (flow->deficit <= 0) {
  232. flow->deficit += q->quantum;
  233. list_move_tail(&flow->flowchain, &q->old_flows);
  234. goto begin;
  235. }
  236. if (flow->head) {
  237. skb = dequeue_head(flow);
  238. pkt_len = qdisc_pkt_len(skb);
  239. sch->qstats.backlog -= pkt_len;
  240. sch->q.qlen--;
  241. qdisc_bstats_update(sch, skb);
  242. }
  243. if (!skb) {
  244. /* force a pass through old_flows to prevent starvation */
  245. if (head == &q->new_flows && !list_empty(&q->old_flows))
  246. list_move_tail(&flow->flowchain, &q->old_flows);
  247. else
  248. list_del_init(&flow->flowchain);
  249. goto begin;
  250. }
  251. flow->qlen--;
  252. flow->deficit -= pkt_len;
  253. flow->backlog -= pkt_len;
  254. q->memory_usage -= get_pie_cb(skb)->mem_usage;
  255. pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
  256. return skb;
  257. }
  258. static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
  259. struct netlink_ext_ack *extack)
  260. {
  261. struct fq_pie_sched_data *q = qdisc_priv(sch);
  262. struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
  263. unsigned int len_dropped = 0;
  264. unsigned int num_dropped = 0;
  265. int err;
  266. err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
  267. if (err < 0)
  268. return err;
  269. sch_tree_lock(sch);
  270. if (tb[TCA_FQ_PIE_LIMIT]) {
  271. u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
  272. WRITE_ONCE(q->p_params.limit, limit);
  273. WRITE_ONCE(sch->limit, limit);
  274. }
  275. if (tb[TCA_FQ_PIE_FLOWS]) {
  276. if (q->flows) {
  277. NL_SET_ERR_MSG_MOD(extack,
  278. "Number of flows cannot be changed");
  279. goto flow_error;
  280. }
  281. q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
  282. if (!q->flows_cnt || q->flows_cnt > 65536) {
  283. NL_SET_ERR_MSG_MOD(extack,
  284. "Number of flows must range in [1..65536]");
  285. goto flow_error;
  286. }
  287. }
  288. /* convert from microseconds to pschedtime */
  289. if (tb[TCA_FQ_PIE_TARGET]) {
  290. /* target is in us */
  291. u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
  292. /* convert to pschedtime */
  293. WRITE_ONCE(q->p_params.target,
  294. PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
  295. }
  296. /* tupdate is in jiffies */
  297. if (tb[TCA_FQ_PIE_TUPDATE])
  298. WRITE_ONCE(q->p_params.tupdate,
  299. usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])));
  300. if (tb[TCA_FQ_PIE_ALPHA])
  301. WRITE_ONCE(q->p_params.alpha,
  302. nla_get_u32(tb[TCA_FQ_PIE_ALPHA]));
  303. if (tb[TCA_FQ_PIE_BETA])
  304. WRITE_ONCE(q->p_params.beta,
  305. nla_get_u32(tb[TCA_FQ_PIE_BETA]));
  306. if (tb[TCA_FQ_PIE_QUANTUM])
  307. WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]));
  308. if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
  309. WRITE_ONCE(q->memory_limit,
  310. nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]));
  311. if (tb[TCA_FQ_PIE_ECN_PROB])
  312. WRITE_ONCE(q->ecn_prob,
  313. nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]));
  314. if (tb[TCA_FQ_PIE_ECN])
  315. WRITE_ONCE(q->p_params.ecn,
  316. nla_get_u32(tb[TCA_FQ_PIE_ECN]));
  317. if (tb[TCA_FQ_PIE_BYTEMODE])
  318. WRITE_ONCE(q->p_params.bytemode,
  319. nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]));
  320. if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
  321. WRITE_ONCE(q->p_params.dq_rate_estimator,
  322. nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]));
  323. /* Drop excess packets if new limit is lower */
  324. while (sch->q.qlen > sch->limit) {
  325. struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
  326. len_dropped += qdisc_pkt_len(skb);
  327. num_dropped += 1;
  328. rtnl_kfree_skbs(skb, skb);
  329. }
  330. qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
  331. sch_tree_unlock(sch);
  332. return 0;
  333. flow_error:
  334. sch_tree_unlock(sch);
  335. return -EINVAL;
  336. }
  337. static void fq_pie_timer(struct timer_list *t)
  338. {
  339. struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
  340. unsigned long next, tupdate;
  341. struct Qdisc *sch = q->sch;
  342. spinlock_t *root_lock; /* to lock qdisc for probability calculations */
  343. int max_cnt, i;
  344. rcu_read_lock();
  345. root_lock = qdisc_lock(qdisc_root_sleeping(sch));
  346. spin_lock(root_lock);
  347. /* Limit this expensive loop to 2048 flows per round. */
  348. max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
  349. for (i = 0; i < max_cnt; i++) {
  350. pie_calculate_probability(&q->p_params,
  351. &q->flows[q->flows_cursor].vars,
  352. q->flows[q->flows_cursor].backlog);
  353. q->flows_cursor++;
  354. }
  355. tupdate = q->p_params.tupdate;
  356. next = 0;
  357. if (q->flows_cursor >= q->flows_cnt) {
  358. q->flows_cursor = 0;
  359. next = tupdate;
  360. }
  361. if (tupdate)
  362. mod_timer(&q->adapt_timer, jiffies + next);
  363. spin_unlock(root_lock);
  364. rcu_read_unlock();
  365. }
  366. static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
  367. struct netlink_ext_ack *extack)
  368. {
  369. struct fq_pie_sched_data *q = qdisc_priv(sch);
  370. int err;
  371. u32 idx;
  372. pie_params_init(&q->p_params);
  373. sch->limit = 10 * 1024;
  374. q->p_params.limit = sch->limit;
  375. q->quantum = psched_mtu(qdisc_dev(sch));
  376. q->sch = sch;
  377. q->ecn_prob = 10;
  378. q->flows_cnt = 1024;
  379. q->memory_limit = SZ_32M;
  380. INIT_LIST_HEAD(&q->new_flows);
  381. INIT_LIST_HEAD(&q->old_flows);
  382. timer_setup(&q->adapt_timer, fq_pie_timer, 0);
  383. if (opt) {
  384. err = fq_pie_change(sch, opt, extack);
  385. if (err)
  386. return err;
  387. }
  388. err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
  389. if (err)
  390. goto init_failure;
  391. q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow),
  392. GFP_KERNEL);
  393. if (!q->flows) {
  394. err = -ENOMEM;
  395. goto init_failure;
  396. }
  397. for (idx = 0; idx < q->flows_cnt; idx++) {
  398. struct fq_pie_flow *flow = q->flows + idx;
  399. INIT_LIST_HEAD(&flow->flowchain);
  400. pie_vars_init(&flow->vars);
  401. }
  402. mod_timer(&q->adapt_timer, jiffies + HZ / 2);
  403. return 0;
  404. init_failure:
  405. q->flows_cnt = 0;
  406. return err;
  407. }
  408. static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
  409. {
  410. struct fq_pie_sched_data *q = qdisc_priv(sch);
  411. struct nlattr *opts;
  412. opts = nla_nest_start(skb, TCA_OPTIONS);
  413. if (!opts)
  414. return -EMSGSIZE;
  415. /* convert target from pschedtime to us */
  416. if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, READ_ONCE(sch->limit)) ||
  417. nla_put_u32(skb, TCA_FQ_PIE_FLOWS, READ_ONCE(q->flows_cnt)) ||
  418. nla_put_u32(skb, TCA_FQ_PIE_TARGET,
  419. ((u32)PSCHED_TICKS2NS(READ_ONCE(q->p_params.target))) /
  420. NSEC_PER_USEC) ||
  421. nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
  422. jiffies_to_usecs(READ_ONCE(q->p_params.tupdate))) ||
  423. nla_put_u32(skb, TCA_FQ_PIE_ALPHA, READ_ONCE(q->p_params.alpha)) ||
  424. nla_put_u32(skb, TCA_FQ_PIE_BETA, READ_ONCE(q->p_params.beta)) ||
  425. nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, READ_ONCE(q->quantum)) ||
  426. nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT,
  427. READ_ONCE(q->memory_limit)) ||
  428. nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, READ_ONCE(q->ecn_prob)) ||
  429. nla_put_u32(skb, TCA_FQ_PIE_ECN, READ_ONCE(q->p_params.ecn)) ||
  430. nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, READ_ONCE(q->p_params.bytemode)) ||
  431. nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
  432. READ_ONCE(q->p_params.dq_rate_estimator)))
  433. goto nla_put_failure;
  434. return nla_nest_end(skb, opts);
  435. nla_put_failure:
  436. nla_nest_cancel(skb, opts);
  437. return -EMSGSIZE;
  438. }
  439. static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
  440. {
  441. struct fq_pie_sched_data *q = qdisc_priv(sch);
  442. struct tc_fq_pie_xstats st = {
  443. .packets_in = q->stats.packets_in,
  444. .overlimit = q->stats.overlimit,
  445. .overmemory = q->overmemory,
  446. .dropped = q->stats.dropped,
  447. .ecn_mark = q->stats.ecn_mark,
  448. .new_flow_count = q->new_flow_count,
  449. .memory_usage = q->memory_usage,
  450. };
  451. struct list_head *pos;
  452. sch_tree_lock(sch);
  453. list_for_each(pos, &q->new_flows)
  454. st.new_flows_len++;
  455. list_for_each(pos, &q->old_flows)
  456. st.old_flows_len++;
  457. sch_tree_unlock(sch);
  458. return gnet_stats_copy_app(d, &st, sizeof(st));
  459. }
  460. static void fq_pie_reset(struct Qdisc *sch)
  461. {
  462. struct fq_pie_sched_data *q = qdisc_priv(sch);
  463. u32 idx;
  464. INIT_LIST_HEAD(&q->new_flows);
  465. INIT_LIST_HEAD(&q->old_flows);
  466. for (idx = 0; idx < q->flows_cnt; idx++) {
  467. struct fq_pie_flow *flow = q->flows + idx;
  468. /* Removes all packets from flow */
  469. rtnl_kfree_skbs(flow->head, flow->tail);
  470. flow->head = NULL;
  471. INIT_LIST_HEAD(&flow->flowchain);
  472. pie_vars_init(&flow->vars);
  473. }
  474. }
  475. static void fq_pie_destroy(struct Qdisc *sch)
  476. {
  477. struct fq_pie_sched_data *q = qdisc_priv(sch);
  478. tcf_block_put(q->block);
  479. q->p_params.tupdate = 0;
  480. del_timer_sync(&q->adapt_timer);
  481. kvfree(q->flows);
  482. }
  483. static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
  484. .id = "fq_pie",
  485. .priv_size = sizeof(struct fq_pie_sched_data),
  486. .enqueue = fq_pie_qdisc_enqueue,
  487. .dequeue = fq_pie_qdisc_dequeue,
  488. .peek = qdisc_peek_dequeued,
  489. .init = fq_pie_init,
  490. .destroy = fq_pie_destroy,
  491. .reset = fq_pie_reset,
  492. .change = fq_pie_change,
  493. .dump = fq_pie_dump,
  494. .dump_stats = fq_pie_dump_stats,
  495. .owner = THIS_MODULE,
  496. };
  497. MODULE_ALIAS_NET_SCH("fq_pie");
  498. static int __init fq_pie_module_init(void)
  499. {
  500. return register_qdisc(&fq_pie_qdisc_ops);
  501. }
  502. static void __exit fq_pie_module_exit(void)
  503. {
  504. unregister_qdisc(&fq_pie_qdisc_ops);
  505. }
  506. module_init(fq_pie_module_init);
  507. module_exit(fq_pie_module_exit);
  508. MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");
  509. MODULE_AUTHOR("Mohit P. Tahiliani");
  510. MODULE_LICENSE("GPL");