flow_offload.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/kernel.h>
  3. #include <linux/slab.h>
  4. #include <net/act_api.h>
  5. #include <net/flow_offload.h>
  6. #include <linux/rtnetlink.h>
  7. #include <linux/mutex.h>
  8. #include <linux/rhashtable.h>
  9. struct flow_rule *flow_rule_alloc(unsigned int num_actions)
  10. {
  11. struct flow_rule *rule;
  12. int i;
  13. rule = kzalloc(struct_size(rule, action.entries, num_actions),
  14. GFP_KERNEL);
  15. if (!rule)
  16. return NULL;
  17. rule->action.num_entries = num_actions;
  18. /* Pre-fill each action hw_stats with DONT_CARE.
  19. * Caller can override this if it wants stats for a given action.
  20. */
  21. for (i = 0; i < num_actions; i++)
  22. rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
  23. return rule;
  24. }
  25. EXPORT_SYMBOL(flow_rule_alloc);
  26. struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
  27. {
  28. struct flow_offload_action *fl_action;
  29. int i;
  30. fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
  31. GFP_KERNEL);
  32. if (!fl_action)
  33. return NULL;
  34. fl_action->action.num_entries = num_actions;
  35. /* Pre-fill each action hw_stats with DONT_CARE.
  36. * Caller can override this if it wants stats for a given action.
  37. */
  38. for (i = 0; i < num_actions; i++)
  39. fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
  40. return fl_action;
  41. }
  42. #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
  43. const struct flow_match *__m = &(__rule)->match; \
  44. struct flow_dissector *__d = (__m)->dissector; \
  45. \
  46. (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
  47. (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
  48. void flow_rule_match_meta(const struct flow_rule *rule,
  49. struct flow_match_meta *out)
  50. {
  51. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
  52. }
  53. EXPORT_SYMBOL(flow_rule_match_meta);
  54. void flow_rule_match_basic(const struct flow_rule *rule,
  55. struct flow_match_basic *out)
  56. {
  57. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
  58. }
  59. EXPORT_SYMBOL(flow_rule_match_basic);
  60. void flow_rule_match_control(const struct flow_rule *rule,
  61. struct flow_match_control *out)
  62. {
  63. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
  64. }
  65. EXPORT_SYMBOL(flow_rule_match_control);
  66. void flow_rule_match_eth_addrs(const struct flow_rule *rule,
  67. struct flow_match_eth_addrs *out)
  68. {
  69. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
  70. }
  71. EXPORT_SYMBOL(flow_rule_match_eth_addrs);
  72. void flow_rule_match_vlan(const struct flow_rule *rule,
  73. struct flow_match_vlan *out)
  74. {
  75. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
  76. }
  77. EXPORT_SYMBOL(flow_rule_match_vlan);
  78. void flow_rule_match_cvlan(const struct flow_rule *rule,
  79. struct flow_match_vlan *out)
  80. {
  81. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
  82. }
  83. EXPORT_SYMBOL(flow_rule_match_cvlan);
  84. void flow_rule_match_arp(const struct flow_rule *rule,
  85. struct flow_match_arp *out)
  86. {
  87. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out);
  88. }
  89. EXPORT_SYMBOL(flow_rule_match_arp);
  90. void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
  91. struct flow_match_ipv4_addrs *out)
  92. {
  93. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
  94. }
  95. EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
  96. void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
  97. struct flow_match_ipv6_addrs *out)
  98. {
  99. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
  100. }
  101. EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
  102. void flow_rule_match_ip(const struct flow_rule *rule,
  103. struct flow_match_ip *out)
  104. {
  105. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
  106. }
  107. EXPORT_SYMBOL(flow_rule_match_ip);
  108. void flow_rule_match_ports(const struct flow_rule *rule,
  109. struct flow_match_ports *out)
  110. {
  111. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
  112. }
  113. EXPORT_SYMBOL(flow_rule_match_ports);
  114. void flow_rule_match_ports_range(const struct flow_rule *rule,
  115. struct flow_match_ports_range *out)
  116. {
  117. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
  118. }
  119. EXPORT_SYMBOL(flow_rule_match_ports_range);
  120. void flow_rule_match_tcp(const struct flow_rule *rule,
  121. struct flow_match_tcp *out)
  122. {
  123. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
  124. }
  125. EXPORT_SYMBOL(flow_rule_match_tcp);
  126. void flow_rule_match_ipsec(const struct flow_rule *rule,
  127. struct flow_match_ipsec *out)
  128. {
  129. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out);
  130. }
  131. EXPORT_SYMBOL(flow_rule_match_ipsec);
  132. void flow_rule_match_icmp(const struct flow_rule *rule,
  133. struct flow_match_icmp *out)
  134. {
  135. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
  136. }
  137. EXPORT_SYMBOL(flow_rule_match_icmp);
  138. void flow_rule_match_mpls(const struct flow_rule *rule,
  139. struct flow_match_mpls *out)
  140. {
  141. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
  142. }
  143. EXPORT_SYMBOL(flow_rule_match_mpls);
  144. void flow_rule_match_enc_control(const struct flow_rule *rule,
  145. struct flow_match_control *out)
  146. {
  147. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
  148. }
  149. EXPORT_SYMBOL(flow_rule_match_enc_control);
  150. void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
  151. struct flow_match_ipv4_addrs *out)
  152. {
  153. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
  154. }
  155. EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
  156. void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
  157. struct flow_match_ipv6_addrs *out)
  158. {
  159. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
  160. }
  161. EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
  162. void flow_rule_match_enc_ip(const struct flow_rule *rule,
  163. struct flow_match_ip *out)
  164. {
  165. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
  166. }
  167. EXPORT_SYMBOL(flow_rule_match_enc_ip);
  168. void flow_rule_match_enc_ports(const struct flow_rule *rule,
  169. struct flow_match_ports *out)
  170. {
  171. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
  172. }
  173. EXPORT_SYMBOL(flow_rule_match_enc_ports);
  174. void flow_rule_match_enc_keyid(const struct flow_rule *rule,
  175. struct flow_match_enc_keyid *out)
  176. {
  177. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
  178. }
  179. EXPORT_SYMBOL(flow_rule_match_enc_keyid);
  180. void flow_rule_match_enc_opts(const struct flow_rule *rule,
  181. struct flow_match_enc_opts *out)
  182. {
  183. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
  184. }
  185. EXPORT_SYMBOL(flow_rule_match_enc_opts);
  186. struct flow_action_cookie *flow_action_cookie_create(void *data,
  187. unsigned int len,
  188. gfp_t gfp)
  189. {
  190. struct flow_action_cookie *cookie;
  191. cookie = kmalloc(sizeof(*cookie) + len, gfp);
  192. if (!cookie)
  193. return NULL;
  194. cookie->cookie_len = len;
  195. memcpy(cookie->cookie, data, len);
  196. return cookie;
  197. }
  198. EXPORT_SYMBOL(flow_action_cookie_create);
  199. void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
  200. {
  201. kfree(cookie);
  202. }
  203. EXPORT_SYMBOL(flow_action_cookie_destroy);
  204. void flow_rule_match_ct(const struct flow_rule *rule,
  205. struct flow_match_ct *out)
  206. {
  207. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
  208. }
  209. EXPORT_SYMBOL(flow_rule_match_ct);
  210. void flow_rule_match_pppoe(const struct flow_rule *rule,
  211. struct flow_match_pppoe *out)
  212. {
  213. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
  214. }
  215. EXPORT_SYMBOL(flow_rule_match_pppoe);
  216. void flow_rule_match_l2tpv3(const struct flow_rule *rule,
  217. struct flow_match_l2tpv3 *out)
  218. {
  219. FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out);
  220. }
  221. EXPORT_SYMBOL(flow_rule_match_l2tpv3);
  222. struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
  223. void *cb_ident, void *cb_priv,
  224. void (*release)(void *cb_priv))
  225. {
  226. struct flow_block_cb *block_cb;
  227. block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
  228. if (!block_cb)
  229. return ERR_PTR(-ENOMEM);
  230. block_cb->cb = cb;
  231. block_cb->cb_ident = cb_ident;
  232. block_cb->cb_priv = cb_priv;
  233. block_cb->release = release;
  234. return block_cb;
  235. }
  236. EXPORT_SYMBOL(flow_block_cb_alloc);
  237. void flow_block_cb_free(struct flow_block_cb *block_cb)
  238. {
  239. if (block_cb->release)
  240. block_cb->release(block_cb->cb_priv);
  241. kfree(block_cb);
  242. }
  243. EXPORT_SYMBOL(flow_block_cb_free);
  244. struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
  245. flow_setup_cb_t *cb, void *cb_ident)
  246. {
  247. struct flow_block_cb *block_cb;
  248. list_for_each_entry(block_cb, &block->cb_list, list) {
  249. if (block_cb->cb == cb &&
  250. block_cb->cb_ident == cb_ident)
  251. return block_cb;
  252. }
  253. return NULL;
  254. }
  255. EXPORT_SYMBOL(flow_block_cb_lookup);
  256. void *flow_block_cb_priv(struct flow_block_cb *block_cb)
  257. {
  258. return block_cb->cb_priv;
  259. }
  260. EXPORT_SYMBOL(flow_block_cb_priv);
  261. void flow_block_cb_incref(struct flow_block_cb *block_cb)
  262. {
  263. block_cb->refcnt++;
  264. }
  265. EXPORT_SYMBOL(flow_block_cb_incref);
  266. unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
  267. {
  268. return --block_cb->refcnt;
  269. }
  270. EXPORT_SYMBOL(flow_block_cb_decref);
  271. bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
  272. struct list_head *driver_block_list)
  273. {
  274. struct flow_block_cb *block_cb;
  275. list_for_each_entry(block_cb, driver_block_list, driver_list) {
  276. if (block_cb->cb == cb &&
  277. block_cb->cb_ident == cb_ident)
  278. return true;
  279. }
  280. return false;
  281. }
  282. EXPORT_SYMBOL(flow_block_cb_is_busy);
  283. int flow_block_cb_setup_simple(struct flow_block_offload *f,
  284. struct list_head *driver_block_list,
  285. flow_setup_cb_t *cb,
  286. void *cb_ident, void *cb_priv,
  287. bool ingress_only)
  288. {
  289. struct flow_block_cb *block_cb;
  290. if (ingress_only &&
  291. f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  292. return -EOPNOTSUPP;
  293. f->driver_block_list = driver_block_list;
  294. switch (f->command) {
  295. case FLOW_BLOCK_BIND:
  296. if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
  297. return -EBUSY;
  298. block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
  299. if (IS_ERR(block_cb))
  300. return PTR_ERR(block_cb);
  301. flow_block_cb_add(block_cb, f);
  302. list_add_tail(&block_cb->driver_list, driver_block_list);
  303. return 0;
  304. case FLOW_BLOCK_UNBIND:
  305. block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
  306. if (!block_cb)
  307. return -ENOENT;
  308. flow_block_cb_remove(block_cb, f);
  309. list_del(&block_cb->driver_list);
  310. return 0;
  311. default:
  312. return -EOPNOTSUPP;
  313. }
  314. }
  315. EXPORT_SYMBOL(flow_block_cb_setup_simple);
  316. static DEFINE_MUTEX(flow_indr_block_lock);
  317. static LIST_HEAD(flow_block_indr_list);
  318. static LIST_HEAD(flow_block_indr_dev_list);
  319. static LIST_HEAD(flow_indir_dev_list);
  320. struct flow_indr_dev {
  321. struct list_head list;
  322. flow_indr_block_bind_cb_t *cb;
  323. void *cb_priv;
  324. refcount_t refcnt;
  325. };
  326. static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
  327. void *cb_priv)
  328. {
  329. struct flow_indr_dev *indr_dev;
  330. indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
  331. if (!indr_dev)
  332. return NULL;
  333. indr_dev->cb = cb;
  334. indr_dev->cb_priv = cb_priv;
  335. refcount_set(&indr_dev->refcnt, 1);
  336. return indr_dev;
  337. }
  338. struct flow_indir_dev_info {
  339. void *data;
  340. struct net_device *dev;
  341. struct Qdisc *sch;
  342. enum tc_setup_type type;
  343. void (*cleanup)(struct flow_block_cb *block_cb);
  344. struct list_head list;
  345. enum flow_block_command command;
  346. enum flow_block_binder_type binder_type;
  347. struct list_head *cb_list;
  348. };
  349. static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
  350. {
  351. struct flow_block_offload bo;
  352. struct flow_indir_dev_info *cur;
  353. list_for_each_entry(cur, &flow_indir_dev_list, list) {
  354. memset(&bo, 0, sizeof(bo));
  355. bo.command = cur->command;
  356. bo.binder_type = cur->binder_type;
  357. INIT_LIST_HEAD(&bo.cb_list);
  358. cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
  359. list_splice(&bo.cb_list, cur->cb_list);
  360. }
  361. }
  362. int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
  363. {
  364. struct flow_indr_dev *indr_dev;
  365. mutex_lock(&flow_indr_block_lock);
  366. list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
  367. if (indr_dev->cb == cb &&
  368. indr_dev->cb_priv == cb_priv) {
  369. refcount_inc(&indr_dev->refcnt);
  370. mutex_unlock(&flow_indr_block_lock);
  371. return 0;
  372. }
  373. }
  374. indr_dev = flow_indr_dev_alloc(cb, cb_priv);
  375. if (!indr_dev) {
  376. mutex_unlock(&flow_indr_block_lock);
  377. return -ENOMEM;
  378. }
  379. list_add(&indr_dev->list, &flow_block_indr_dev_list);
  380. existing_qdiscs_register(cb, cb_priv);
  381. mutex_unlock(&flow_indr_block_lock);
  382. tcf_action_reoffload_cb(cb, cb_priv, true);
  383. return 0;
  384. }
  385. EXPORT_SYMBOL(flow_indr_dev_register);
  386. static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
  387. void *cb_priv,
  388. struct list_head *cleanup_list)
  389. {
  390. struct flow_block_cb *this, *next;
  391. list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
  392. if (this->release == release &&
  393. this->indr.cb_priv == cb_priv)
  394. list_move(&this->indr.list, cleanup_list);
  395. }
  396. }
  397. static void flow_block_indr_notify(struct list_head *cleanup_list)
  398. {
  399. struct flow_block_cb *this, *next;
  400. list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
  401. list_del(&this->indr.list);
  402. this->indr.cleanup(this);
  403. }
  404. }
  405. void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
  406. void (*release)(void *cb_priv))
  407. {
  408. struct flow_indr_dev *this, *next, *indr_dev = NULL;
  409. LIST_HEAD(cleanup_list);
  410. mutex_lock(&flow_indr_block_lock);
  411. list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
  412. if (this->cb == cb &&
  413. this->cb_priv == cb_priv &&
  414. refcount_dec_and_test(&this->refcnt)) {
  415. indr_dev = this;
  416. list_del(&indr_dev->list);
  417. break;
  418. }
  419. }
  420. if (!indr_dev) {
  421. mutex_unlock(&flow_indr_block_lock);
  422. return;
  423. }
  424. __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
  425. mutex_unlock(&flow_indr_block_lock);
  426. tcf_action_reoffload_cb(cb, cb_priv, false);
  427. flow_block_indr_notify(&cleanup_list);
  428. kfree(indr_dev);
  429. }
  430. EXPORT_SYMBOL(flow_indr_dev_unregister);
  431. static void flow_block_indr_init(struct flow_block_cb *flow_block,
  432. struct flow_block_offload *bo,
  433. struct net_device *dev, struct Qdisc *sch, void *data,
  434. void *cb_priv,
  435. void (*cleanup)(struct flow_block_cb *block_cb))
  436. {
  437. flow_block->indr.binder_type = bo->binder_type;
  438. flow_block->indr.data = data;
  439. flow_block->indr.cb_priv = cb_priv;
  440. flow_block->indr.dev = dev;
  441. flow_block->indr.sch = sch;
  442. flow_block->indr.cleanup = cleanup;
  443. }
  444. struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
  445. void *cb_ident, void *cb_priv,
  446. void (*release)(void *cb_priv),
  447. struct flow_block_offload *bo,
  448. struct net_device *dev,
  449. struct Qdisc *sch, void *data,
  450. void *indr_cb_priv,
  451. void (*cleanup)(struct flow_block_cb *block_cb))
  452. {
  453. struct flow_block_cb *block_cb;
  454. block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
  455. if (IS_ERR(block_cb))
  456. goto out;
  457. flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
  458. list_add(&block_cb->indr.list, &flow_block_indr_list);
  459. out:
  460. return block_cb;
  461. }
  462. EXPORT_SYMBOL(flow_indr_block_cb_alloc);
  463. static struct flow_indir_dev_info *find_indir_dev(void *data)
  464. {
  465. struct flow_indir_dev_info *cur;
  466. list_for_each_entry(cur, &flow_indir_dev_list, list) {
  467. if (cur->data == data)
  468. return cur;
  469. }
  470. return NULL;
  471. }
  472. static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
  473. enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
  474. struct flow_block_offload *bo)
  475. {
  476. struct flow_indir_dev_info *info;
  477. info = find_indir_dev(data);
  478. if (info)
  479. return -EEXIST;
  480. info = kzalloc(sizeof(*info), GFP_KERNEL);
  481. if (!info)
  482. return -ENOMEM;
  483. info->data = data;
  484. info->dev = dev;
  485. info->sch = sch;
  486. info->type = type;
  487. info->cleanup = cleanup;
  488. info->command = bo->command;
  489. info->binder_type = bo->binder_type;
  490. info->cb_list = bo->cb_list_head;
  491. list_add(&info->list, &flow_indir_dev_list);
  492. return 0;
  493. }
  494. static int indir_dev_remove(void *data)
  495. {
  496. struct flow_indir_dev_info *info;
  497. info = find_indir_dev(data);
  498. if (!info)
  499. return -ENOENT;
  500. list_del(&info->list);
  501. kfree(info);
  502. return 0;
  503. }
  504. int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
  505. enum tc_setup_type type, void *data,
  506. struct flow_block_offload *bo,
  507. void (*cleanup)(struct flow_block_cb *block_cb))
  508. {
  509. struct flow_indr_dev *this;
  510. u32 count = 0;
  511. int err;
  512. mutex_lock(&flow_indr_block_lock);
  513. if (bo) {
  514. if (bo->command == FLOW_BLOCK_BIND)
  515. indir_dev_add(data, dev, sch, type, cleanup, bo);
  516. else if (bo->command == FLOW_BLOCK_UNBIND)
  517. indir_dev_remove(data);
  518. }
  519. list_for_each_entry(this, &flow_block_indr_dev_list, list) {
  520. err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
  521. if (!err)
  522. count++;
  523. }
  524. mutex_unlock(&flow_indr_block_lock);
  525. return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
  526. }
  527. EXPORT_SYMBOL(flow_indr_dev_setup_offload);
  528. bool flow_indr_dev_exists(void)
  529. {
  530. return !list_empty(&flow_block_indr_dev_list);
  531. }
  532. EXPORT_SYMBOL(flow_indr_dev_exists);