spectrum_mr_tcam.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/list.h>
  5. #include <linux/netdevice.h>
  6. #include "spectrum_mr_tcam.h"
  7. #include "reg.h"
  8. #include "spectrum.h"
  9. #include "core_acl_flex_actions.h"
  10. #include "spectrum_mr.h"
  11. struct mlxsw_sp_mr_tcam {
  12. void *priv;
  13. };
  14. /* This struct maps to one RIGR2 register entry */
  15. struct mlxsw_sp_mr_erif_sublist {
  16. struct list_head list;
  17. u32 rigr2_kvdl_index;
  18. int num_erifs;
  19. u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
  20. bool synced;
  21. };
  22. struct mlxsw_sp_mr_tcam_erif_list {
  23. struct list_head erif_sublists;
  24. u32 kvdl_index;
  25. };
  26. static bool
  27. mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
  28. struct mlxsw_sp_mr_erif_sublist *erif_sublist)
  29. {
  30. int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  31. MC_ERIF_LIST_ENTRIES);
  32. return erif_sublist->num_erifs == erif_list_entries;
  33. }
  34. static void
  35. mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
  36. {
  37. INIT_LIST_HEAD(&erif_list->erif_sublists);
  38. }
  39. static struct mlxsw_sp_mr_erif_sublist *
  40. mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
  41. struct mlxsw_sp_mr_tcam_erif_list *erif_list)
  42. {
  43. struct mlxsw_sp_mr_erif_sublist *erif_sublist;
  44. int err;
  45. erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
  46. if (!erif_sublist)
  47. return ERR_PTR(-ENOMEM);
  48. err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
  49. 1, &erif_sublist->rigr2_kvdl_index);
  50. if (err) {
  51. kfree(erif_sublist);
  52. return ERR_PTR(err);
  53. }
  54. list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
  55. return erif_sublist;
  56. }
  57. static void
  58. mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
  59. struct mlxsw_sp_mr_erif_sublist *erif_sublist)
  60. {
  61. list_del(&erif_sublist->list);
  62. mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
  63. 1, erif_sublist->rigr2_kvdl_index);
  64. kfree(erif_sublist);
  65. }
  66. static int
  67. mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
  68. struct mlxsw_sp_mr_tcam_erif_list *erif_list,
  69. u16 erif_index)
  70. {
  71. struct mlxsw_sp_mr_erif_sublist *sublist;
  72. /* If either there is no erif_entry or the last one is full, allocate a
  73. * new one.
  74. */
  75. if (list_empty(&erif_list->erif_sublists)) {
  76. sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
  77. if (IS_ERR(sublist))
  78. return PTR_ERR(sublist);
  79. erif_list->kvdl_index = sublist->rigr2_kvdl_index;
  80. } else {
  81. sublist = list_last_entry(&erif_list->erif_sublists,
  82. struct mlxsw_sp_mr_erif_sublist,
  83. list);
  84. sublist->synced = false;
  85. if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
  86. sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
  87. erif_list);
  88. if (IS_ERR(sublist))
  89. return PTR_ERR(sublist);
  90. }
  91. }
  92. /* Add the eRIF to the last entry's last index */
  93. sublist->erif_indices[sublist->num_erifs++] = erif_index;
  94. return 0;
  95. }
  96. static void
  97. mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
  98. struct mlxsw_sp_mr_tcam_erif_list *erif_list)
  99. {
  100. struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
  101. list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
  102. list)
  103. mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
  104. }
  105. static int
  106. mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
  107. struct mlxsw_sp_mr_tcam_erif_list *erif_list)
  108. {
  109. struct mlxsw_sp_mr_erif_sublist *curr_sublist;
  110. char rigr2_pl[MLXSW_REG_RIGR2_LEN];
  111. int err;
  112. int i;
  113. list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
  114. if (curr_sublist->synced)
  115. continue;
  116. /* If the sublist is not the last one, pack the next index */
  117. if (list_is_last(&curr_sublist->list,
  118. &erif_list->erif_sublists)) {
  119. mlxsw_reg_rigr2_pack(rigr2_pl,
  120. curr_sublist->rigr2_kvdl_index,
  121. false, 0);
  122. } else {
  123. struct mlxsw_sp_mr_erif_sublist *next_sublist;
  124. next_sublist = list_next_entry(curr_sublist, list);
  125. mlxsw_reg_rigr2_pack(rigr2_pl,
  126. curr_sublist->rigr2_kvdl_index,
  127. true,
  128. next_sublist->rigr2_kvdl_index);
  129. }
  130. /* Pack all the erifs */
  131. for (i = 0; i < curr_sublist->num_erifs; i++) {
  132. u16 erif_index = curr_sublist->erif_indices[i];
  133. mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
  134. erif_index);
  135. }
  136. /* Write the entry */
  137. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
  138. rigr2_pl);
  139. if (err)
  140. /* No need of a rollback here because this
  141. * hardware entry should not be pointed yet.
  142. */
  143. return err;
  144. curr_sublist->synced = true;
  145. }
  146. return 0;
  147. }
  148. static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
  149. struct mlxsw_sp_mr_tcam_erif_list *from)
  150. {
  151. list_splice(&from->erif_sublists, &to->erif_sublists);
  152. to->kvdl_index = from->kvdl_index;
  153. }
  154. struct mlxsw_sp_mr_tcam_route {
  155. struct mlxsw_sp_mr_tcam_erif_list erif_list;
  156. struct mlxsw_afa_block *afa_block;
  157. u32 counter_index;
  158. enum mlxsw_sp_mr_route_action action;
  159. struct mlxsw_sp_mr_route_key key;
  160. u16 irif_index;
  161. u16 min_mtu;
  162. void *priv;
  163. };
  164. static struct mlxsw_afa_block *
  165. mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
  166. enum mlxsw_sp_mr_route_action route_action,
  167. u16 irif_index, u32 counter_index,
  168. u16 min_mtu,
  169. struct mlxsw_sp_mr_tcam_erif_list *erif_list)
  170. {
  171. struct mlxsw_afa_block *afa_block;
  172. int err;
  173. afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
  174. if (IS_ERR(afa_block))
  175. return afa_block;
  176. err = mlxsw_afa_block_append_allocated_counter(afa_block,
  177. counter_index);
  178. if (err)
  179. goto err;
  180. switch (route_action) {
  181. case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
  182. err = mlxsw_afa_block_append_trap(afa_block,
  183. MLXSW_TRAP_ID_ACL1);
  184. if (err)
  185. goto err;
  186. break;
  187. case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
  188. case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
  189. /* If we are about to append a multicast router action, commit
  190. * the erif_list.
  191. */
  192. err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
  193. if (err)
  194. goto err;
  195. err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
  196. min_mtu, false,
  197. erif_list->kvdl_index);
  198. if (err)
  199. goto err;
  200. if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
  201. err = mlxsw_afa_block_append_trap_and_forward(afa_block,
  202. MLXSW_TRAP_ID_ACL2);
  203. if (err)
  204. goto err;
  205. }
  206. break;
  207. default:
  208. err = -EINVAL;
  209. goto err;
  210. }
  211. err = mlxsw_afa_block_commit(afa_block);
  212. if (err)
  213. goto err;
  214. return afa_block;
  215. err:
  216. mlxsw_afa_block_destroy(afa_block);
  217. return ERR_PTR(err);
  218. }
  219. static void
  220. mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
  221. {
  222. mlxsw_afa_block_destroy(afa_block);
  223. }
  224. static int
  225. mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
  226. struct mlxsw_sp_mr_tcam_erif_list *erif_list,
  227. struct mlxsw_sp_mr_route_info *route_info)
  228. {
  229. int err;
  230. int i;
  231. for (i = 0; i < route_info->erif_num; i++) {
  232. u16 erif_index = route_info->erif_indices[i];
  233. err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
  234. erif_index);
  235. if (err)
  236. return err;
  237. }
  238. return 0;
  239. }
  240. static int
  241. mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
  242. void *route_priv,
  243. struct mlxsw_sp_mr_route_params *route_params)
  244. {
  245. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  246. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  247. struct mlxsw_sp_mr_tcam *mr_tcam = priv;
  248. int err;
  249. route->key = route_params->key;
  250. route->irif_index = route_params->value.irif_index;
  251. route->min_mtu = route_params->value.min_mtu;
  252. route->action = route_params->value.route_action;
  253. /* Create the egress RIFs list */
  254. mlxsw_sp_mr_erif_list_init(&route->erif_list);
  255. err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
  256. &route_params->value);
  257. if (err)
  258. goto err_erif_populate;
  259. /* Create the flow counter */
  260. err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
  261. if (err)
  262. goto err_counter_alloc;
  263. /* Create the flexible action block */
  264. route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
  265. route->action,
  266. route->irif_index,
  267. route->counter_index,
  268. route->min_mtu,
  269. &route->erif_list);
  270. if (IS_ERR(route->afa_block)) {
  271. err = PTR_ERR(route->afa_block);
  272. goto err_afa_block_create;
  273. }
  274. route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
  275. if (!route->priv) {
  276. err = -ENOMEM;
  277. goto err_route_priv_alloc;
  278. }
  279. /* Write the route to the TCAM */
  280. err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
  281. &route->key, route->afa_block,
  282. route_params->prio);
  283. if (err)
  284. goto err_route_create;
  285. return 0;
  286. err_route_create:
  287. kfree(route->priv);
  288. err_route_priv_alloc:
  289. mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
  290. err_afa_block_create:
  291. mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
  292. err_erif_populate:
  293. err_counter_alloc:
  294. mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
  295. return err;
  296. }
  297. static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
  298. void *priv, void *route_priv)
  299. {
  300. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  301. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  302. struct mlxsw_sp_mr_tcam *mr_tcam = priv;
  303. ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
  304. kfree(route->priv);
  305. mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
  306. mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
  307. mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
  308. }
  309. static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
  310. void *route_priv, u64 *packets,
  311. u64 *bytes)
  312. {
  313. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  314. return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
  315. packets, bytes);
  316. }
  317. static int
  318. mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
  319. void *route_priv,
  320. enum mlxsw_sp_mr_route_action route_action)
  321. {
  322. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  323. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  324. struct mlxsw_afa_block *afa_block;
  325. int err;
  326. /* Create a new flexible action block */
  327. afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
  328. route->irif_index,
  329. route->counter_index,
  330. route->min_mtu,
  331. &route->erif_list);
  332. if (IS_ERR(afa_block))
  333. return PTR_ERR(afa_block);
  334. /* Update the TCAM route entry */
  335. err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
  336. if (err)
  337. goto err;
  338. /* Delete the old one */
  339. mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
  340. route->afa_block = afa_block;
  341. route->action = route_action;
  342. return 0;
  343. err:
  344. mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
  345. return err;
  346. }
  347. static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
  348. void *route_priv, u16 min_mtu)
  349. {
  350. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  351. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  352. struct mlxsw_afa_block *afa_block;
  353. int err;
  354. /* Create a new flexible action block */
  355. afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
  356. route->action,
  357. route->irif_index,
  358. route->counter_index,
  359. min_mtu,
  360. &route->erif_list);
  361. if (IS_ERR(afa_block))
  362. return PTR_ERR(afa_block);
  363. /* Update the TCAM route entry */
  364. err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
  365. if (err)
  366. goto err;
  367. /* Delete the old one */
  368. mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
  369. route->afa_block = afa_block;
  370. route->min_mtu = min_mtu;
  371. return 0;
  372. err:
  373. mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
  374. return err;
  375. }
  376. static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
  377. void *route_priv, u16 irif_index)
  378. {
  379. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  380. if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
  381. return -EINVAL;
  382. route->irif_index = irif_index;
  383. return 0;
  384. }
  385. static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
  386. void *route_priv, u16 erif_index)
  387. {
  388. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  389. int err;
  390. err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
  391. erif_index);
  392. if (err)
  393. return err;
  394. /* Commit the action only if the route action is not TRAP */
  395. if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
  396. return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
  397. &route->erif_list);
  398. return 0;
  399. }
  400. static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
  401. void *route_priv, u16 erif_index)
  402. {
  403. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  404. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  405. struct mlxsw_sp_mr_erif_sublist *erif_sublist;
  406. struct mlxsw_sp_mr_tcam_erif_list erif_list;
  407. struct mlxsw_afa_block *afa_block;
  408. int err;
  409. int i;
  410. /* Create a copy of the original erif_list without the deleted entry */
  411. mlxsw_sp_mr_erif_list_init(&erif_list);
  412. list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
  413. for (i = 0; i < erif_sublist->num_erifs; i++) {
  414. u16 curr_erif = erif_sublist->erif_indices[i];
  415. if (curr_erif == erif_index)
  416. continue;
  417. err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
  418. curr_erif);
  419. if (err)
  420. goto err_erif_list_add;
  421. }
  422. }
  423. /* Create the flexible action block pointing to the new erif_list */
  424. afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
  425. route->irif_index,
  426. route->counter_index,
  427. route->min_mtu,
  428. &erif_list);
  429. if (IS_ERR(afa_block)) {
  430. err = PTR_ERR(afa_block);
  431. goto err_afa_block_create;
  432. }
  433. /* Update the TCAM route entry */
  434. err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
  435. if (err)
  436. goto err_route_write;
  437. mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
  438. mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
  439. route->afa_block = afa_block;
  440. mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
  441. return 0;
  442. err_route_write:
  443. mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
  444. err_afa_block_create:
  445. err_erif_list_add:
  446. mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
  447. return err;
  448. }
  449. static int
  450. mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
  451. struct mlxsw_sp_mr_route_info *route_info)
  452. {
  453. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  454. struct mlxsw_sp_mr_tcam_route *route = route_priv;
  455. struct mlxsw_sp_mr_tcam_erif_list erif_list;
  456. struct mlxsw_afa_block *afa_block;
  457. int err;
  458. /* Create a new erif_list */
  459. mlxsw_sp_mr_erif_list_init(&erif_list);
  460. err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
  461. if (err)
  462. goto err_erif_populate;
  463. /* Create the flexible action block pointing to the new erif_list */
  464. afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
  465. route_info->route_action,
  466. route_info->irif_index,
  467. route->counter_index,
  468. route_info->min_mtu,
  469. &erif_list);
  470. if (IS_ERR(afa_block)) {
  471. err = PTR_ERR(afa_block);
  472. goto err_afa_block_create;
  473. }
  474. /* Update the TCAM route entry */
  475. err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
  476. if (err)
  477. goto err_route_write;
  478. mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
  479. mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
  480. route->afa_block = afa_block;
  481. mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
  482. route->action = route_info->route_action;
  483. route->irif_index = route_info->irif_index;
  484. route->min_mtu = route_info->min_mtu;
  485. return 0;
  486. err_route_write:
  487. mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
  488. err_afa_block_create:
  489. err_erif_populate:
  490. mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
  491. return err;
  492. }
  493. static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
  494. {
  495. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  496. struct mlxsw_sp_mr_tcam *mr_tcam = priv;
  497. int err;
  498. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
  499. return -EIO;
  500. mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
  501. if (!mr_tcam->priv)
  502. return -ENOMEM;
  503. err = ops->init(mlxsw_sp, mr_tcam->priv);
  504. if (err)
  505. goto err_init;
  506. return 0;
  507. err_init:
  508. kfree(mr_tcam->priv);
  509. return err;
  510. }
  511. static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
  512. {
  513. const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
  514. struct mlxsw_sp_mr_tcam *mr_tcam = priv;
  515. ops->fini(mr_tcam->priv);
  516. kfree(mr_tcam->priv);
  517. }
  518. const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
  519. .priv_size = sizeof(struct mlxsw_sp_mr_tcam),
  520. .route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
  521. .init = mlxsw_sp_mr_tcam_init,
  522. .route_create = mlxsw_sp_mr_tcam_route_create,
  523. .route_update = mlxsw_sp_mr_tcam_route_update,
  524. .route_stats = mlxsw_sp_mr_tcam_route_stats,
  525. .route_action_update = mlxsw_sp_mr_tcam_route_action_update,
  526. .route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
  527. .route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
  528. .route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
  529. .route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
  530. .route_destroy = mlxsw_sp_mr_tcam_route_destroy,
  531. .fini = mlxsw_sp_mr_tcam_fini,
  532. };