switch.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Handling of a single switch chip, part of a switch fabric
  4. *
  5. * Copyright (c) 2017 Savoir-faire Linux Inc.
  6. * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  7. */
  8. #include <linux/if_bridge.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/notifier.h>
  11. #include <linux/if_vlan.h>
  12. #include <net/switchdev.h>
  13. #include "dsa.h"
  14. #include "netlink.h"
  15. #include "port.h"
  16. #include "switch.h"
  17. #include "tag_8021q.h"
  18. #include "trace.h"
  19. #include "user.h"
  20. static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
  21. unsigned int ageing_time)
  22. {
  23. struct dsa_port *dp;
  24. dsa_switch_for_each_port(dp, ds)
  25. if (dp->ageing_time && dp->ageing_time < ageing_time)
  26. ageing_time = dp->ageing_time;
  27. return ageing_time;
  28. }
  29. static int dsa_switch_ageing_time(struct dsa_switch *ds,
  30. struct dsa_notifier_ageing_time_info *info)
  31. {
  32. unsigned int ageing_time = info->ageing_time;
  33. if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
  34. return -ERANGE;
  35. if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
  36. return -ERANGE;
  37. /* Program the fastest ageing time in case of multiple bridges */
  38. ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
  39. if (ds->ops->set_ageing_time)
  40. return ds->ops->set_ageing_time(ds, ageing_time);
  41. return 0;
  42. }
  43. static bool dsa_port_mtu_match(struct dsa_port *dp,
  44. struct dsa_notifier_mtu_info *info)
  45. {
  46. return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
  47. }
  48. static int dsa_switch_mtu(struct dsa_switch *ds,
  49. struct dsa_notifier_mtu_info *info)
  50. {
  51. struct dsa_port *dp;
  52. int ret;
  53. if (!ds->ops->port_change_mtu)
  54. return -EOPNOTSUPP;
  55. dsa_switch_for_each_port(dp, ds) {
  56. if (dsa_port_mtu_match(dp, info)) {
  57. ret = ds->ops->port_change_mtu(ds, dp->index,
  58. info->mtu);
  59. if (ret)
  60. return ret;
  61. }
  62. }
  63. return 0;
  64. }
  65. static int dsa_switch_bridge_join(struct dsa_switch *ds,
  66. struct dsa_notifier_bridge_info *info)
  67. {
  68. int err;
  69. if (info->dp->ds == ds) {
  70. if (!ds->ops->port_bridge_join)
  71. return -EOPNOTSUPP;
  72. err = ds->ops->port_bridge_join(ds, info->dp->index,
  73. info->bridge,
  74. &info->tx_fwd_offload,
  75. info->extack);
  76. if (err)
  77. return err;
  78. }
  79. if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
  80. err = ds->ops->crosschip_bridge_join(ds,
  81. info->dp->ds->dst->index,
  82. info->dp->ds->index,
  83. info->dp->index,
  84. info->bridge,
  85. info->extack);
  86. if (err)
  87. return err;
  88. }
  89. return 0;
  90. }
  91. static int dsa_switch_bridge_leave(struct dsa_switch *ds,
  92. struct dsa_notifier_bridge_info *info)
  93. {
  94. if (info->dp->ds == ds && ds->ops->port_bridge_leave)
  95. ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
  96. if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
  97. ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
  98. info->dp->ds->index,
  99. info->dp->index,
  100. info->bridge);
  101. return 0;
  102. }
  103. /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
  104. * DSA links) that sit between the targeted port on which the notifier was
  105. * emitted and its dedicated CPU port.
  106. */
  107. static bool dsa_port_host_address_match(struct dsa_port *dp,
  108. const struct dsa_port *targeted_dp)
  109. {
  110. struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
  111. if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
  112. return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
  113. cpu_dp->index);
  114. return false;
  115. }
  116. static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
  117. const unsigned char *addr, u16 vid,
  118. struct dsa_db db)
  119. {
  120. struct dsa_mac_addr *a;
  121. list_for_each_entry(a, addr_list, list)
  122. if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
  123. dsa_db_equal(&a->db, &db))
  124. return a;
  125. return NULL;
  126. }
  127. static int dsa_port_do_mdb_add(struct dsa_port *dp,
  128. const struct switchdev_obj_port_mdb *mdb,
  129. struct dsa_db db)
  130. {
  131. struct dsa_switch *ds = dp->ds;
  132. struct dsa_mac_addr *a;
  133. int port = dp->index;
  134. int err = 0;
  135. /* No need to bother with refcounting for user ports */
  136. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
  137. err = ds->ops->port_mdb_add(ds, port, mdb, db);
  138. trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
  139. return err;
  140. }
  141. mutex_lock(&dp->addr_lists_lock);
  142. a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
  143. if (a) {
  144. refcount_inc(&a->refcount);
  145. trace_dsa_mdb_add_bump(dp, mdb->addr, mdb->vid, &db,
  146. &a->refcount);
  147. goto out;
  148. }
  149. a = kzalloc(sizeof(*a), GFP_KERNEL);
  150. if (!a) {
  151. err = -ENOMEM;
  152. goto out;
  153. }
  154. err = ds->ops->port_mdb_add(ds, port, mdb, db);
  155. trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
  156. if (err) {
  157. kfree(a);
  158. goto out;
  159. }
  160. ether_addr_copy(a->addr, mdb->addr);
  161. a->vid = mdb->vid;
  162. a->db = db;
  163. refcount_set(&a->refcount, 1);
  164. list_add_tail(&a->list, &dp->mdbs);
  165. out:
  166. mutex_unlock(&dp->addr_lists_lock);
  167. return err;
  168. }
  169. static int dsa_port_do_mdb_del(struct dsa_port *dp,
  170. const struct switchdev_obj_port_mdb *mdb,
  171. struct dsa_db db)
  172. {
  173. struct dsa_switch *ds = dp->ds;
  174. struct dsa_mac_addr *a;
  175. int port = dp->index;
  176. int err = 0;
  177. /* No need to bother with refcounting for user ports */
  178. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
  179. err = ds->ops->port_mdb_del(ds, port, mdb, db);
  180. trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
  181. return err;
  182. }
  183. mutex_lock(&dp->addr_lists_lock);
  184. a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
  185. if (!a) {
  186. trace_dsa_mdb_del_not_found(dp, mdb->addr, mdb->vid, &db);
  187. err = -ENOENT;
  188. goto out;
  189. }
  190. if (!refcount_dec_and_test(&a->refcount)) {
  191. trace_dsa_mdb_del_drop(dp, mdb->addr, mdb->vid, &db,
  192. &a->refcount);
  193. goto out;
  194. }
  195. err = ds->ops->port_mdb_del(ds, port, mdb, db);
  196. trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
  197. if (err) {
  198. refcount_set(&a->refcount, 1);
  199. goto out;
  200. }
  201. list_del(&a->list);
  202. kfree(a);
  203. out:
  204. mutex_unlock(&dp->addr_lists_lock);
  205. return err;
  206. }
  207. static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
  208. u16 vid, struct dsa_db db)
  209. {
  210. struct dsa_switch *ds = dp->ds;
  211. struct dsa_mac_addr *a;
  212. int port = dp->index;
  213. int err = 0;
  214. /* No need to bother with refcounting for user ports */
  215. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
  216. err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
  217. trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
  218. return err;
  219. }
  220. mutex_lock(&dp->addr_lists_lock);
  221. a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
  222. if (a) {
  223. refcount_inc(&a->refcount);
  224. trace_dsa_fdb_add_bump(dp, addr, vid, &db, &a->refcount);
  225. goto out;
  226. }
  227. a = kzalloc(sizeof(*a), GFP_KERNEL);
  228. if (!a) {
  229. err = -ENOMEM;
  230. goto out;
  231. }
  232. err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
  233. trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
  234. if (err) {
  235. kfree(a);
  236. goto out;
  237. }
  238. ether_addr_copy(a->addr, addr);
  239. a->vid = vid;
  240. a->db = db;
  241. refcount_set(&a->refcount, 1);
  242. list_add_tail(&a->list, &dp->fdbs);
  243. out:
  244. mutex_unlock(&dp->addr_lists_lock);
  245. return err;
  246. }
  247. static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
  248. u16 vid, struct dsa_db db)
  249. {
  250. struct dsa_switch *ds = dp->ds;
  251. struct dsa_mac_addr *a;
  252. int port = dp->index;
  253. int err = 0;
  254. /* No need to bother with refcounting for user ports */
  255. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
  256. err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
  257. trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
  258. return err;
  259. }
  260. mutex_lock(&dp->addr_lists_lock);
  261. a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
  262. if (!a) {
  263. trace_dsa_fdb_del_not_found(dp, addr, vid, &db);
  264. err = -ENOENT;
  265. goto out;
  266. }
  267. if (!refcount_dec_and_test(&a->refcount)) {
  268. trace_dsa_fdb_del_drop(dp, addr, vid, &db, &a->refcount);
  269. goto out;
  270. }
  271. err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
  272. trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
  273. if (err) {
  274. refcount_set(&a->refcount, 1);
  275. goto out;
  276. }
  277. list_del(&a->list);
  278. kfree(a);
  279. out:
  280. mutex_unlock(&dp->addr_lists_lock);
  281. return err;
  282. }
  283. static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
  284. const unsigned char *addr, u16 vid,
  285. struct dsa_db db)
  286. {
  287. struct dsa_mac_addr *a;
  288. int err = 0;
  289. mutex_lock(&lag->fdb_lock);
  290. a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
  291. if (a) {
  292. refcount_inc(&a->refcount);
  293. trace_dsa_lag_fdb_add_bump(lag->dev, addr, vid, &db,
  294. &a->refcount);
  295. goto out;
  296. }
  297. a = kzalloc(sizeof(*a), GFP_KERNEL);
  298. if (!a) {
  299. err = -ENOMEM;
  300. goto out;
  301. }
  302. err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
  303. trace_dsa_lag_fdb_add_hw(lag->dev, addr, vid, &db, err);
  304. if (err) {
  305. kfree(a);
  306. goto out;
  307. }
  308. ether_addr_copy(a->addr, addr);
  309. a->vid = vid;
  310. a->db = db;
  311. refcount_set(&a->refcount, 1);
  312. list_add_tail(&a->list, &lag->fdbs);
  313. out:
  314. mutex_unlock(&lag->fdb_lock);
  315. return err;
  316. }
  317. static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
  318. const unsigned char *addr, u16 vid,
  319. struct dsa_db db)
  320. {
  321. struct dsa_mac_addr *a;
  322. int err = 0;
  323. mutex_lock(&lag->fdb_lock);
  324. a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
  325. if (!a) {
  326. trace_dsa_lag_fdb_del_not_found(lag->dev, addr, vid, &db);
  327. err = -ENOENT;
  328. goto out;
  329. }
  330. if (!refcount_dec_and_test(&a->refcount)) {
  331. trace_dsa_lag_fdb_del_drop(lag->dev, addr, vid, &db,
  332. &a->refcount);
  333. goto out;
  334. }
  335. err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
  336. trace_dsa_lag_fdb_del_hw(lag->dev, addr, vid, &db, err);
  337. if (err) {
  338. refcount_set(&a->refcount, 1);
  339. goto out;
  340. }
  341. list_del(&a->list);
  342. kfree(a);
  343. out:
  344. mutex_unlock(&lag->fdb_lock);
  345. return err;
  346. }
  347. static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
  348. struct dsa_notifier_fdb_info *info)
  349. {
  350. struct dsa_port *dp;
  351. int err = 0;
  352. if (!ds->ops->port_fdb_add)
  353. return -EOPNOTSUPP;
  354. dsa_switch_for_each_port(dp, ds) {
  355. if (dsa_port_host_address_match(dp, info->dp)) {
  356. if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
  357. err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
  358. info->addr,
  359. info->vid,
  360. info->db);
  361. } else {
  362. err = dsa_port_do_fdb_add(dp, info->addr,
  363. info->vid, info->db);
  364. }
  365. if (err)
  366. break;
  367. }
  368. }
  369. return err;
  370. }
  371. static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
  372. struct dsa_notifier_fdb_info *info)
  373. {
  374. struct dsa_port *dp;
  375. int err = 0;
  376. if (!ds->ops->port_fdb_del)
  377. return -EOPNOTSUPP;
  378. dsa_switch_for_each_port(dp, ds) {
  379. if (dsa_port_host_address_match(dp, info->dp)) {
  380. if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
  381. err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
  382. info->addr,
  383. info->vid,
  384. info->db);
  385. } else {
  386. err = dsa_port_do_fdb_del(dp, info->addr,
  387. info->vid, info->db);
  388. }
  389. if (err)
  390. break;
  391. }
  392. }
  393. return err;
  394. }
  395. static int dsa_switch_fdb_add(struct dsa_switch *ds,
  396. struct dsa_notifier_fdb_info *info)
  397. {
  398. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  399. struct dsa_port *dp = dsa_to_port(ds, port);
  400. if (!ds->ops->port_fdb_add)
  401. return -EOPNOTSUPP;
  402. return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
  403. }
  404. static int dsa_switch_fdb_del(struct dsa_switch *ds,
  405. struct dsa_notifier_fdb_info *info)
  406. {
  407. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  408. struct dsa_port *dp = dsa_to_port(ds, port);
  409. if (!ds->ops->port_fdb_del)
  410. return -EOPNOTSUPP;
  411. return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
  412. }
  413. static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
  414. struct dsa_notifier_lag_fdb_info *info)
  415. {
  416. struct dsa_port *dp;
  417. if (!ds->ops->lag_fdb_add)
  418. return -EOPNOTSUPP;
  419. /* Notify switch only if it has a port in this LAG */
  420. dsa_switch_for_each_port(dp, ds)
  421. if (dsa_port_offloads_lag(dp, info->lag))
  422. return dsa_switch_do_lag_fdb_add(ds, info->lag,
  423. info->addr, info->vid,
  424. info->db);
  425. return 0;
  426. }
  427. static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
  428. struct dsa_notifier_lag_fdb_info *info)
  429. {
  430. struct dsa_port *dp;
  431. if (!ds->ops->lag_fdb_del)
  432. return -EOPNOTSUPP;
  433. /* Notify switch only if it has a port in this LAG */
  434. dsa_switch_for_each_port(dp, ds)
  435. if (dsa_port_offloads_lag(dp, info->lag))
  436. return dsa_switch_do_lag_fdb_del(ds, info->lag,
  437. info->addr, info->vid,
  438. info->db);
  439. return 0;
  440. }
  441. static int dsa_switch_lag_change(struct dsa_switch *ds,
  442. struct dsa_notifier_lag_info *info)
  443. {
  444. if (info->dp->ds == ds && ds->ops->port_lag_change)
  445. return ds->ops->port_lag_change(ds, info->dp->index);
  446. if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
  447. return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
  448. info->dp->index);
  449. return 0;
  450. }
  451. static int dsa_switch_lag_join(struct dsa_switch *ds,
  452. struct dsa_notifier_lag_info *info)
  453. {
  454. if (info->dp->ds == ds && ds->ops->port_lag_join)
  455. return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
  456. info->info, info->extack);
  457. if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
  458. return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
  459. info->dp->index, info->lag,
  460. info->info, info->extack);
  461. return -EOPNOTSUPP;
  462. }
  463. static int dsa_switch_lag_leave(struct dsa_switch *ds,
  464. struct dsa_notifier_lag_info *info)
  465. {
  466. if (info->dp->ds == ds && ds->ops->port_lag_leave)
  467. return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
  468. if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
  469. return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
  470. info->dp->index, info->lag);
  471. return -EOPNOTSUPP;
  472. }
  473. static int dsa_switch_mdb_add(struct dsa_switch *ds,
  474. struct dsa_notifier_mdb_info *info)
  475. {
  476. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  477. struct dsa_port *dp = dsa_to_port(ds, port);
  478. if (!ds->ops->port_mdb_add)
  479. return -EOPNOTSUPP;
  480. return dsa_port_do_mdb_add(dp, info->mdb, info->db);
  481. }
  482. static int dsa_switch_mdb_del(struct dsa_switch *ds,
  483. struct dsa_notifier_mdb_info *info)
  484. {
  485. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  486. struct dsa_port *dp = dsa_to_port(ds, port);
  487. if (!ds->ops->port_mdb_del)
  488. return -EOPNOTSUPP;
  489. return dsa_port_do_mdb_del(dp, info->mdb, info->db);
  490. }
  491. static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
  492. struct dsa_notifier_mdb_info *info)
  493. {
  494. struct dsa_port *dp;
  495. int err = 0;
  496. if (!ds->ops->port_mdb_add)
  497. return -EOPNOTSUPP;
  498. dsa_switch_for_each_port(dp, ds) {
  499. if (dsa_port_host_address_match(dp, info->dp)) {
  500. err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
  501. if (err)
  502. break;
  503. }
  504. }
  505. return err;
  506. }
  507. static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
  508. struct dsa_notifier_mdb_info *info)
  509. {
  510. struct dsa_port *dp;
  511. int err = 0;
  512. if (!ds->ops->port_mdb_del)
  513. return -EOPNOTSUPP;
  514. dsa_switch_for_each_port(dp, ds) {
  515. if (dsa_port_host_address_match(dp, info->dp)) {
  516. err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
  517. if (err)
  518. break;
  519. }
  520. }
  521. return err;
  522. }
  523. /* Port VLANs match on the targeted port and on all DSA ports */
  524. static bool dsa_port_vlan_match(struct dsa_port *dp,
  525. struct dsa_notifier_vlan_info *info)
  526. {
  527. return dsa_port_is_dsa(dp) || dp == info->dp;
  528. }
  529. /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
  530. * (upstream and downstream) of that switch and its upstream switches.
  531. */
  532. static bool dsa_port_host_vlan_match(struct dsa_port *dp,
  533. const struct dsa_port *targeted_dp)
  534. {
  535. struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
  536. if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
  537. return dsa_port_is_dsa(dp) || dp == cpu_dp;
  538. return false;
  539. }
  540. struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
  541. const struct switchdev_obj_port_vlan *vlan)
  542. {
  543. struct dsa_vlan *v;
  544. list_for_each_entry(v, vlan_list, list)
  545. if (v->vid == vlan->vid)
  546. return v;
  547. return NULL;
  548. }
  549. static int dsa_port_do_vlan_add(struct dsa_port *dp,
  550. const struct switchdev_obj_port_vlan *vlan,
  551. struct netlink_ext_ack *extack)
  552. {
  553. struct dsa_switch *ds = dp->ds;
  554. int port = dp->index;
  555. struct dsa_vlan *v;
  556. int err = 0;
  557. /* No need to bother with refcounting for user ports. */
  558. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
  559. err = ds->ops->port_vlan_add(ds, port, vlan, extack);
  560. trace_dsa_vlan_add_hw(dp, vlan, err);
  561. return err;
  562. }
  563. /* No need to propagate on shared ports the existing VLANs that were
  564. * re-notified after just the flags have changed. This would cause a
  565. * refcount bump which we need to avoid, since it unbalances the
  566. * additions with the deletions.
  567. */
  568. if (vlan->changed)
  569. return 0;
  570. mutex_lock(&dp->vlans_lock);
  571. v = dsa_vlan_find(&dp->vlans, vlan);
  572. if (v) {
  573. refcount_inc(&v->refcount);
  574. trace_dsa_vlan_add_bump(dp, vlan, &v->refcount);
  575. goto out;
  576. }
  577. v = kzalloc(sizeof(*v), GFP_KERNEL);
  578. if (!v) {
  579. err = -ENOMEM;
  580. goto out;
  581. }
  582. err = ds->ops->port_vlan_add(ds, port, vlan, extack);
  583. trace_dsa_vlan_add_hw(dp, vlan, err);
  584. if (err) {
  585. kfree(v);
  586. goto out;
  587. }
  588. v->vid = vlan->vid;
  589. refcount_set(&v->refcount, 1);
  590. list_add_tail(&v->list, &dp->vlans);
  591. out:
  592. mutex_unlock(&dp->vlans_lock);
  593. return err;
  594. }
  595. static int dsa_port_do_vlan_del(struct dsa_port *dp,
  596. const struct switchdev_obj_port_vlan *vlan)
  597. {
  598. struct dsa_switch *ds = dp->ds;
  599. int port = dp->index;
  600. struct dsa_vlan *v;
  601. int err = 0;
  602. /* No need to bother with refcounting for user ports */
  603. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
  604. err = ds->ops->port_vlan_del(ds, port, vlan);
  605. trace_dsa_vlan_del_hw(dp, vlan, err);
  606. return err;
  607. }
  608. mutex_lock(&dp->vlans_lock);
  609. v = dsa_vlan_find(&dp->vlans, vlan);
  610. if (!v) {
  611. trace_dsa_vlan_del_not_found(dp, vlan);
  612. err = -ENOENT;
  613. goto out;
  614. }
  615. if (!refcount_dec_and_test(&v->refcount)) {
  616. trace_dsa_vlan_del_drop(dp, vlan, &v->refcount);
  617. goto out;
  618. }
  619. err = ds->ops->port_vlan_del(ds, port, vlan);
  620. trace_dsa_vlan_del_hw(dp, vlan, err);
  621. if (err) {
  622. refcount_set(&v->refcount, 1);
  623. goto out;
  624. }
  625. list_del(&v->list);
  626. kfree(v);
  627. out:
  628. mutex_unlock(&dp->vlans_lock);
  629. return err;
  630. }
  631. static int dsa_switch_vlan_add(struct dsa_switch *ds,
  632. struct dsa_notifier_vlan_info *info)
  633. {
  634. struct dsa_port *dp;
  635. int err;
  636. if (!ds->ops->port_vlan_add)
  637. return -EOPNOTSUPP;
  638. dsa_switch_for_each_port(dp, ds) {
  639. if (dsa_port_vlan_match(dp, info)) {
  640. err = dsa_port_do_vlan_add(dp, info->vlan,
  641. info->extack);
  642. if (err)
  643. return err;
  644. }
  645. }
  646. return 0;
  647. }
  648. static int dsa_switch_vlan_del(struct dsa_switch *ds,
  649. struct dsa_notifier_vlan_info *info)
  650. {
  651. struct dsa_port *dp;
  652. int err;
  653. if (!ds->ops->port_vlan_del)
  654. return -EOPNOTSUPP;
  655. dsa_switch_for_each_port(dp, ds) {
  656. if (dsa_port_vlan_match(dp, info)) {
  657. err = dsa_port_do_vlan_del(dp, info->vlan);
  658. if (err)
  659. return err;
  660. }
  661. }
  662. return 0;
  663. }
  664. static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
  665. struct dsa_notifier_vlan_info *info)
  666. {
  667. struct dsa_port *dp;
  668. int err;
  669. if (!ds->ops->port_vlan_add)
  670. return -EOPNOTSUPP;
  671. dsa_switch_for_each_port(dp, ds) {
  672. if (dsa_port_host_vlan_match(dp, info->dp)) {
  673. err = dsa_port_do_vlan_add(dp, info->vlan,
  674. info->extack);
  675. if (err)
  676. return err;
  677. }
  678. }
  679. return 0;
  680. }
  681. static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
  682. struct dsa_notifier_vlan_info *info)
  683. {
  684. struct dsa_port *dp;
  685. int err;
  686. if (!ds->ops->port_vlan_del)
  687. return -EOPNOTSUPP;
  688. dsa_switch_for_each_port(dp, ds) {
  689. if (dsa_port_host_vlan_match(dp, info->dp)) {
  690. err = dsa_port_do_vlan_del(dp, info->vlan);
  691. if (err)
  692. return err;
  693. }
  694. }
  695. return 0;
  696. }
  697. static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
  698. struct dsa_notifier_tag_proto_info *info)
  699. {
  700. const struct dsa_device_ops *tag_ops = info->tag_ops;
  701. struct dsa_port *dp, *cpu_dp;
  702. int err;
  703. if (!ds->ops->change_tag_protocol)
  704. return -EOPNOTSUPP;
  705. ASSERT_RTNL();
  706. err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
  707. if (err)
  708. return err;
  709. dsa_switch_for_each_cpu_port(cpu_dp, ds)
  710. dsa_port_set_tag_protocol(cpu_dp, tag_ops);
  711. /* Now that changing the tag protocol can no longer fail, let's update
  712. * the remaining bits which are "duplicated for faster access", and the
  713. * bits that depend on the tagger, such as the MTU.
  714. */
  715. dsa_switch_for_each_user_port(dp, ds) {
  716. struct net_device *user = dp->user;
  717. dsa_user_setup_tagger(user);
  718. /* rtnl_mutex is held in dsa_tree_change_tag_proto */
  719. dsa_user_change_mtu(user, user->mtu);
  720. }
  721. return 0;
  722. }
  723. /* We use the same cross-chip notifiers to inform both the tagger side, as well
  724. * as the switch side, of connection and disconnection events.
  725. * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
  726. * switch side doesn't support connecting to this tagger, and therefore, the
  727. * fact that we don't disconnect the tagger side doesn't constitute a memory
  728. * leak: the tagger will still operate with persistent per-switch memory, just
  729. * with the switch side unconnected to it. What does constitute a hard error is
  730. * when the switch side supports connecting but fails.
  731. */
  732. static int
  733. dsa_switch_connect_tag_proto(struct dsa_switch *ds,
  734. struct dsa_notifier_tag_proto_info *info)
  735. {
  736. const struct dsa_device_ops *tag_ops = info->tag_ops;
  737. int err;
  738. /* Notify the new tagger about the connection to this switch */
  739. if (tag_ops->connect) {
  740. err = tag_ops->connect(ds);
  741. if (err)
  742. return err;
  743. }
  744. if (!ds->ops->connect_tag_protocol)
  745. return -EOPNOTSUPP;
  746. /* Notify the switch about the connection to the new tagger */
  747. err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
  748. if (err) {
  749. /* Revert the new tagger's connection to this tree */
  750. if (tag_ops->disconnect)
  751. tag_ops->disconnect(ds);
  752. return err;
  753. }
  754. return 0;
  755. }
  756. static int
  757. dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
  758. struct dsa_notifier_tag_proto_info *info)
  759. {
  760. const struct dsa_device_ops *tag_ops = info->tag_ops;
  761. /* Notify the tagger about the disconnection from this switch */
  762. if (tag_ops->disconnect && ds->tagger_data)
  763. tag_ops->disconnect(ds);
  764. /* No need to notify the switch, since it shouldn't have any
  765. * resources to tear down
  766. */
  767. return 0;
  768. }
  769. static int
  770. dsa_switch_conduit_state_change(struct dsa_switch *ds,
  771. struct dsa_notifier_conduit_state_info *info)
  772. {
  773. if (!ds->ops->conduit_state_change)
  774. return 0;
  775. ds->ops->conduit_state_change(ds, info->conduit, info->operational);
  776. return 0;
  777. }
  778. static int dsa_switch_event(struct notifier_block *nb,
  779. unsigned long event, void *info)
  780. {
  781. struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
  782. int err;
  783. switch (event) {
  784. case DSA_NOTIFIER_AGEING_TIME:
  785. err = dsa_switch_ageing_time(ds, info);
  786. break;
  787. case DSA_NOTIFIER_BRIDGE_JOIN:
  788. err = dsa_switch_bridge_join(ds, info);
  789. break;
  790. case DSA_NOTIFIER_BRIDGE_LEAVE:
  791. err = dsa_switch_bridge_leave(ds, info);
  792. break;
  793. case DSA_NOTIFIER_FDB_ADD:
  794. err = dsa_switch_fdb_add(ds, info);
  795. break;
  796. case DSA_NOTIFIER_FDB_DEL:
  797. err = dsa_switch_fdb_del(ds, info);
  798. break;
  799. case DSA_NOTIFIER_HOST_FDB_ADD:
  800. err = dsa_switch_host_fdb_add(ds, info);
  801. break;
  802. case DSA_NOTIFIER_HOST_FDB_DEL:
  803. err = dsa_switch_host_fdb_del(ds, info);
  804. break;
  805. case DSA_NOTIFIER_LAG_FDB_ADD:
  806. err = dsa_switch_lag_fdb_add(ds, info);
  807. break;
  808. case DSA_NOTIFIER_LAG_FDB_DEL:
  809. err = dsa_switch_lag_fdb_del(ds, info);
  810. break;
  811. case DSA_NOTIFIER_LAG_CHANGE:
  812. err = dsa_switch_lag_change(ds, info);
  813. break;
  814. case DSA_NOTIFIER_LAG_JOIN:
  815. err = dsa_switch_lag_join(ds, info);
  816. break;
  817. case DSA_NOTIFIER_LAG_LEAVE:
  818. err = dsa_switch_lag_leave(ds, info);
  819. break;
  820. case DSA_NOTIFIER_MDB_ADD:
  821. err = dsa_switch_mdb_add(ds, info);
  822. break;
  823. case DSA_NOTIFIER_MDB_DEL:
  824. err = dsa_switch_mdb_del(ds, info);
  825. break;
  826. case DSA_NOTIFIER_HOST_MDB_ADD:
  827. err = dsa_switch_host_mdb_add(ds, info);
  828. break;
  829. case DSA_NOTIFIER_HOST_MDB_DEL:
  830. err = dsa_switch_host_mdb_del(ds, info);
  831. break;
  832. case DSA_NOTIFIER_VLAN_ADD:
  833. err = dsa_switch_vlan_add(ds, info);
  834. break;
  835. case DSA_NOTIFIER_VLAN_DEL:
  836. err = dsa_switch_vlan_del(ds, info);
  837. break;
  838. case DSA_NOTIFIER_HOST_VLAN_ADD:
  839. err = dsa_switch_host_vlan_add(ds, info);
  840. break;
  841. case DSA_NOTIFIER_HOST_VLAN_DEL:
  842. err = dsa_switch_host_vlan_del(ds, info);
  843. break;
  844. case DSA_NOTIFIER_MTU:
  845. err = dsa_switch_mtu(ds, info);
  846. break;
  847. case DSA_NOTIFIER_TAG_PROTO:
  848. err = dsa_switch_change_tag_proto(ds, info);
  849. break;
  850. case DSA_NOTIFIER_TAG_PROTO_CONNECT:
  851. err = dsa_switch_connect_tag_proto(ds, info);
  852. break;
  853. case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
  854. err = dsa_switch_disconnect_tag_proto(ds, info);
  855. break;
  856. case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
  857. err = dsa_switch_tag_8021q_vlan_add(ds, info);
  858. break;
  859. case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
  860. err = dsa_switch_tag_8021q_vlan_del(ds, info);
  861. break;
  862. case DSA_NOTIFIER_CONDUIT_STATE_CHANGE:
  863. err = dsa_switch_conduit_state_change(ds, info);
  864. break;
  865. default:
  866. err = -EOPNOTSUPP;
  867. break;
  868. }
  869. if (err)
  870. dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
  871. event, err);
  872. return notifier_from_errno(err);
  873. }
  874. /**
  875. * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
  876. * @dst: collection of struct dsa_switch devices to notify.
  877. * @e: event, must be of type DSA_NOTIFIER_*
  878. * @v: event-specific value.
  879. *
  880. * Given a struct dsa_switch_tree, this can be used to run a function once for
  881. * each member DSA switch. The other alternative of traversing the tree is only
  882. * through its ports list, which does not uniquely list the switches.
  883. */
  884. int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
  885. {
  886. struct raw_notifier_head *nh = &dst->nh;
  887. int err;
  888. err = raw_notifier_call_chain(nh, e, v);
  889. return notifier_to_errno(err);
  890. }
  891. /**
  892. * dsa_broadcast - Notify all DSA trees in the system.
  893. * @e: event, must be of type DSA_NOTIFIER_*
  894. * @v: event-specific value.
  895. *
  896. * Can be used to notify the switching fabric of events such as cross-chip
  897. * bridging between disjoint trees (such as islands of tagger-compatible
  898. * switches bridged by an incompatible middle switch).
  899. *
  900. * WARNING: this function is not reliable during probe time, because probing
  901. * between trees is asynchronous and not all DSA trees might have probed.
  902. */
  903. int dsa_broadcast(unsigned long e, void *v)
  904. {
  905. struct dsa_switch_tree *dst;
  906. int err = 0;
  907. list_for_each_entry(dst, &dsa_tree_list, list) {
  908. err = dsa_tree_notify(dst, e, v);
  909. if (err)
  910. break;
  911. }
  912. return err;
  913. }
  914. int dsa_switch_register_notifier(struct dsa_switch *ds)
  915. {
  916. ds->nb.notifier_call = dsa_switch_event;
  917. return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
  918. }
  919. void dsa_switch_unregister_notifier(struct dsa_switch *ds)
  920. {
  921. int err;
  922. err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
  923. if (err)
  924. dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
  925. }