dsa.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * DSA topology and switch handling
  4. *
  5. * Copyright (c) 2008-2009 Marvell Semiconductor
  6. * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
  7. * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
  8. */
  9. #include <linux/device.h>
  10. #include <linux/err.h>
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/slab.h>
  15. #include <linux/rtnetlink.h>
  16. #include <linux/of.h>
  17. #include <linux/of_net.h>
  18. #include <net/dsa_stubs.h>
  19. #include <net/sch_generic.h>
  20. #include "conduit.h"
  21. #include "devlink.h"
  22. #include "dsa.h"
  23. #include "netlink.h"
  24. #include "port.h"
  25. #include "switch.h"
  26. #include "tag.h"
  27. #include "user.h"
  28. #define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
  29. static DEFINE_MUTEX(dsa2_mutex);
  30. LIST_HEAD(dsa_tree_list);
  31. static struct workqueue_struct *dsa_owq;
  32. /* Track the bridges with forwarding offload enabled */
  33. static unsigned long dsa_fwd_offloading_bridges;
  34. bool dsa_schedule_work(struct work_struct *work)
  35. {
  36. return queue_work(dsa_owq, work);
  37. }
  38. void dsa_flush_workqueue(void)
  39. {
  40. flush_workqueue(dsa_owq);
  41. }
  42. EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
  43. /**
  44. * dsa_lag_map() - Map LAG structure to a linear LAG array
  45. * @dst: Tree in which to record the mapping.
  46. * @lag: LAG structure that is to be mapped to the tree's array.
  47. *
  48. * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
  49. * two spaces. The size of the mapping space is determined by the
  50. * driver by setting ds->num_lag_ids. It is perfectly legal to leave
  51. * it unset if it is not needed, in which case these functions become
  52. * no-ops.
  53. */
  54. void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
  55. {
  56. unsigned int id;
  57. for (id = 1; id <= dst->lags_len; id++) {
  58. if (!dsa_lag_by_id(dst, id)) {
  59. dst->lags[id - 1] = lag;
  60. lag->id = id;
  61. return;
  62. }
  63. }
  64. /* No IDs left, which is OK. Some drivers do not need it. The
  65. * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
  66. * returns an error for this device when joining the LAG. The
  67. * driver can then return -EOPNOTSUPP back to DSA, which will
  68. * fall back to a software LAG.
  69. */
  70. }
  71. /**
  72. * dsa_lag_unmap() - Remove a LAG ID mapping
  73. * @dst: Tree in which the mapping is recorded.
  74. * @lag: LAG structure that was mapped.
  75. *
  76. * As there may be multiple users of the mapping, it is only removed
  77. * if there are no other references to it.
  78. */
  79. void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
  80. {
  81. unsigned int id;
  82. dsa_lags_foreach_id(id, dst) {
  83. if (dsa_lag_by_id(dst, id) == lag) {
  84. dst->lags[id - 1] = NULL;
  85. lag->id = 0;
  86. break;
  87. }
  88. }
  89. }
  90. struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
  91. const struct net_device *lag_dev)
  92. {
  93. struct dsa_port *dp;
  94. list_for_each_entry(dp, &dst->ports, list)
  95. if (dsa_port_lag_dev_get(dp) == lag_dev)
  96. return dp->lag;
  97. return NULL;
  98. }
  99. struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
  100. const struct net_device *br)
  101. {
  102. struct dsa_port *dp;
  103. list_for_each_entry(dp, &dst->ports, list)
  104. if (dsa_port_bridge_dev_get(dp) == br)
  105. return dp->bridge;
  106. return NULL;
  107. }
  108. static int dsa_bridge_num_find(const struct net_device *bridge_dev)
  109. {
  110. struct dsa_switch_tree *dst;
  111. list_for_each_entry(dst, &dsa_tree_list, list) {
  112. struct dsa_bridge *bridge;
  113. bridge = dsa_tree_bridge_find(dst, bridge_dev);
  114. if (bridge)
  115. return bridge->num;
  116. }
  117. return 0;
  118. }
  119. unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
  120. {
  121. unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
  122. /* Switches without FDB isolation support don't get unique
  123. * bridge numbering
  124. */
  125. if (!max)
  126. return 0;
  127. if (!bridge_num) {
  128. /* First port that requests FDB isolation or TX forwarding
  129. * offload for this bridge
  130. */
  131. bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
  132. DSA_MAX_NUM_OFFLOADING_BRIDGES,
  133. 1);
  134. if (bridge_num >= max)
  135. return 0;
  136. set_bit(bridge_num, &dsa_fwd_offloading_bridges);
  137. }
  138. return bridge_num;
  139. }
  140. void dsa_bridge_num_put(const struct net_device *bridge_dev,
  141. unsigned int bridge_num)
  142. {
  143. /* Since we refcount bridges, we know that when we call this function
  144. * it is no longer in use, so we can just go ahead and remove it from
  145. * the bit mask.
  146. */
  147. clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
  148. }
  149. struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
  150. {
  151. struct dsa_switch_tree *dst;
  152. struct dsa_port *dp;
  153. list_for_each_entry(dst, &dsa_tree_list, list) {
  154. if (dst->index != tree_index)
  155. continue;
  156. list_for_each_entry(dp, &dst->ports, list) {
  157. if (dp->ds->index != sw_index)
  158. continue;
  159. return dp->ds;
  160. }
  161. }
  162. return NULL;
  163. }
  164. EXPORT_SYMBOL_GPL(dsa_switch_find);
  165. static struct dsa_switch_tree *dsa_tree_find(int index)
  166. {
  167. struct dsa_switch_tree *dst;
  168. list_for_each_entry(dst, &dsa_tree_list, list)
  169. if (dst->index == index)
  170. return dst;
  171. return NULL;
  172. }
  173. static struct dsa_switch_tree *dsa_tree_alloc(int index)
  174. {
  175. struct dsa_switch_tree *dst;
  176. dst = kzalloc(sizeof(*dst), GFP_KERNEL);
  177. if (!dst)
  178. return NULL;
  179. dst->index = index;
  180. INIT_LIST_HEAD(&dst->rtable);
  181. INIT_LIST_HEAD(&dst->ports);
  182. INIT_LIST_HEAD(&dst->list);
  183. list_add_tail(&dst->list, &dsa_tree_list);
  184. kref_init(&dst->refcount);
  185. return dst;
  186. }
  187. static void dsa_tree_free(struct dsa_switch_tree *dst)
  188. {
  189. if (dst->tag_ops)
  190. dsa_tag_driver_put(dst->tag_ops);
  191. list_del(&dst->list);
  192. kfree(dst);
  193. }
  194. static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
  195. {
  196. if (dst)
  197. kref_get(&dst->refcount);
  198. return dst;
  199. }
  200. static struct dsa_switch_tree *dsa_tree_touch(int index)
  201. {
  202. struct dsa_switch_tree *dst;
  203. dst = dsa_tree_find(index);
  204. if (dst)
  205. return dsa_tree_get(dst);
  206. else
  207. return dsa_tree_alloc(index);
  208. }
  209. static void dsa_tree_release(struct kref *ref)
  210. {
  211. struct dsa_switch_tree *dst;
  212. dst = container_of(ref, struct dsa_switch_tree, refcount);
  213. dsa_tree_free(dst);
  214. }
  215. static void dsa_tree_put(struct dsa_switch_tree *dst)
  216. {
  217. if (dst)
  218. kref_put(&dst->refcount, dsa_tree_release);
  219. }
  220. static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
  221. struct device_node *dn)
  222. {
  223. struct dsa_port *dp;
  224. list_for_each_entry(dp, &dst->ports, list)
  225. if (dp->dn == dn)
  226. return dp;
  227. return NULL;
  228. }
  229. static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
  230. struct dsa_port *link_dp)
  231. {
  232. struct dsa_switch *ds = dp->ds;
  233. struct dsa_switch_tree *dst;
  234. struct dsa_link *dl;
  235. dst = ds->dst;
  236. list_for_each_entry(dl, &dst->rtable, list)
  237. if (dl->dp == dp && dl->link_dp == link_dp)
  238. return dl;
  239. dl = kzalloc(sizeof(*dl), GFP_KERNEL);
  240. if (!dl)
  241. return NULL;
  242. dl->dp = dp;
  243. dl->link_dp = link_dp;
  244. INIT_LIST_HEAD(&dl->list);
  245. list_add_tail(&dl->list, &dst->rtable);
  246. return dl;
  247. }
  248. static bool dsa_port_setup_routing_table(struct dsa_port *dp)
  249. {
  250. struct dsa_switch *ds = dp->ds;
  251. struct dsa_switch_tree *dst = ds->dst;
  252. struct device_node *dn = dp->dn;
  253. struct of_phandle_iterator it;
  254. struct dsa_port *link_dp;
  255. struct dsa_link *dl;
  256. int err;
  257. of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
  258. link_dp = dsa_tree_find_port_by_node(dst, it.node);
  259. if (!link_dp) {
  260. of_node_put(it.node);
  261. return false;
  262. }
  263. dl = dsa_link_touch(dp, link_dp);
  264. if (!dl) {
  265. of_node_put(it.node);
  266. return false;
  267. }
  268. }
  269. return true;
  270. }
  271. static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
  272. {
  273. bool complete = true;
  274. struct dsa_port *dp;
  275. list_for_each_entry(dp, &dst->ports, list) {
  276. if (dsa_port_is_dsa(dp)) {
  277. complete = dsa_port_setup_routing_table(dp);
  278. if (!complete)
  279. break;
  280. }
  281. }
  282. return complete;
  283. }
  284. static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
  285. {
  286. struct dsa_port *dp;
  287. list_for_each_entry(dp, &dst->ports, list)
  288. if (dsa_port_is_cpu(dp))
  289. return dp;
  290. return NULL;
  291. }
  292. struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst)
  293. {
  294. struct device_node *ethernet;
  295. struct net_device *conduit;
  296. struct dsa_port *cpu_dp;
  297. cpu_dp = dsa_tree_find_first_cpu(dst);
  298. ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
  299. conduit = of_find_net_device_by_node(ethernet);
  300. of_node_put(ethernet);
  301. return conduit;
  302. }
  303. /* Assign the default CPU port (the first one in the tree) to all ports of the
  304. * fabric which don't already have one as part of their own switch.
  305. */
  306. static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
  307. {
  308. struct dsa_port *cpu_dp, *dp;
  309. cpu_dp = dsa_tree_find_first_cpu(dst);
  310. if (!cpu_dp) {
  311. pr_err("DSA: tree %d has no CPU port\n", dst->index);
  312. return -EINVAL;
  313. }
  314. list_for_each_entry(dp, &dst->ports, list) {
  315. if (dp->cpu_dp)
  316. continue;
  317. if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
  318. dp->cpu_dp = cpu_dp;
  319. }
  320. return 0;
  321. }
  322. static struct dsa_port *
  323. dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
  324. {
  325. struct dsa_port *cpu_dp;
  326. if (!ds->ops->preferred_default_local_cpu_port)
  327. return NULL;
  328. cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
  329. if (!cpu_dp)
  330. return NULL;
  331. if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
  332. return NULL;
  333. return cpu_dp;
  334. }
  335. /* Perform initial assignment of CPU ports to user ports and DSA links in the
  336. * fabric, giving preference to CPU ports local to each switch. Default to
  337. * using the first CPU port in the switch tree if the port does not have a CPU
  338. * port local to this switch.
  339. */
  340. static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
  341. {
  342. struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
  343. list_for_each_entry(cpu_dp, &dst->ports, list) {
  344. if (!dsa_port_is_cpu(cpu_dp))
  345. continue;
  346. preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
  347. if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
  348. continue;
  349. /* Prefer a local CPU port */
  350. dsa_switch_for_each_port(dp, cpu_dp->ds) {
  351. /* Prefer the first local CPU port found */
  352. if (dp->cpu_dp)
  353. continue;
  354. if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
  355. dp->cpu_dp = cpu_dp;
  356. }
  357. }
  358. return dsa_tree_setup_default_cpu(dst);
  359. }
  360. static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
  361. {
  362. struct dsa_port *dp;
  363. list_for_each_entry(dp, &dst->ports, list)
  364. if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
  365. dp->cpu_dp = NULL;
  366. }
  367. static int dsa_port_setup(struct dsa_port *dp)
  368. {
  369. bool dsa_port_link_registered = false;
  370. struct dsa_switch *ds = dp->ds;
  371. bool dsa_port_enabled = false;
  372. int err = 0;
  373. if (dp->setup)
  374. return 0;
  375. err = dsa_port_devlink_setup(dp);
  376. if (err)
  377. return err;
  378. switch (dp->type) {
  379. case DSA_PORT_TYPE_UNUSED:
  380. dsa_port_disable(dp);
  381. break;
  382. case DSA_PORT_TYPE_CPU:
  383. if (dp->dn) {
  384. err = dsa_shared_port_link_register_of(dp);
  385. if (err)
  386. break;
  387. dsa_port_link_registered = true;
  388. } else {
  389. dev_warn(ds->dev,
  390. "skipping link registration for CPU port %d\n",
  391. dp->index);
  392. }
  393. err = dsa_port_enable(dp, NULL);
  394. if (err)
  395. break;
  396. dsa_port_enabled = true;
  397. break;
  398. case DSA_PORT_TYPE_DSA:
  399. if (dp->dn) {
  400. err = dsa_shared_port_link_register_of(dp);
  401. if (err)
  402. break;
  403. dsa_port_link_registered = true;
  404. } else {
  405. dev_warn(ds->dev,
  406. "skipping link registration for DSA port %d\n",
  407. dp->index);
  408. }
  409. err = dsa_port_enable(dp, NULL);
  410. if (err)
  411. break;
  412. dsa_port_enabled = true;
  413. break;
  414. case DSA_PORT_TYPE_USER:
  415. of_get_mac_address(dp->dn, dp->mac);
  416. err = dsa_user_create(dp);
  417. break;
  418. }
  419. if (err && dsa_port_enabled)
  420. dsa_port_disable(dp);
  421. if (err && dsa_port_link_registered)
  422. dsa_shared_port_link_unregister_of(dp);
  423. if (err) {
  424. dsa_port_devlink_teardown(dp);
  425. return err;
  426. }
  427. dp->setup = true;
  428. return 0;
  429. }
  430. static void dsa_port_teardown(struct dsa_port *dp)
  431. {
  432. if (!dp->setup)
  433. return;
  434. switch (dp->type) {
  435. case DSA_PORT_TYPE_UNUSED:
  436. break;
  437. case DSA_PORT_TYPE_CPU:
  438. dsa_port_disable(dp);
  439. if (dp->dn)
  440. dsa_shared_port_link_unregister_of(dp);
  441. break;
  442. case DSA_PORT_TYPE_DSA:
  443. dsa_port_disable(dp);
  444. if (dp->dn)
  445. dsa_shared_port_link_unregister_of(dp);
  446. break;
  447. case DSA_PORT_TYPE_USER:
  448. if (dp->user) {
  449. dsa_user_destroy(dp->user);
  450. dp->user = NULL;
  451. }
  452. break;
  453. }
  454. dsa_port_devlink_teardown(dp);
  455. dp->setup = false;
  456. }
  457. static int dsa_port_setup_as_unused(struct dsa_port *dp)
  458. {
  459. dp->type = DSA_PORT_TYPE_UNUSED;
  460. return dsa_port_setup(dp);
  461. }
  462. static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
  463. {
  464. const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
  465. struct dsa_switch_tree *dst = ds->dst;
  466. int err;
  467. if (tag_ops->proto == dst->default_proto)
  468. goto connect;
  469. rtnl_lock();
  470. err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
  471. rtnl_unlock();
  472. if (err) {
  473. dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
  474. tag_ops->name, ERR_PTR(err));
  475. return err;
  476. }
  477. connect:
  478. if (tag_ops->connect) {
  479. err = tag_ops->connect(ds);
  480. if (err)
  481. return err;
  482. }
  483. if (ds->ops->connect_tag_protocol) {
  484. err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
  485. if (err) {
  486. dev_err(ds->dev,
  487. "Unable to connect to tag protocol \"%s\": %pe\n",
  488. tag_ops->name, ERR_PTR(err));
  489. goto disconnect;
  490. }
  491. }
  492. return 0;
  493. disconnect:
  494. if (tag_ops->disconnect)
  495. tag_ops->disconnect(ds);
  496. return err;
  497. }
  498. static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
  499. {
  500. const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
  501. if (tag_ops->disconnect)
  502. tag_ops->disconnect(ds);
  503. }
  504. static int dsa_switch_setup(struct dsa_switch *ds)
  505. {
  506. int err;
  507. if (ds->setup)
  508. return 0;
  509. /* Initialize ds->phys_mii_mask before registering the user MDIO bus
  510. * driver and before ops->setup() has run, since the switch drivers and
  511. * the user MDIO bus driver rely on these values for probing PHY
  512. * devices or not
  513. */
  514. ds->phys_mii_mask |= dsa_user_ports(ds);
  515. err = dsa_switch_devlink_alloc(ds);
  516. if (err)
  517. return err;
  518. err = dsa_switch_register_notifier(ds);
  519. if (err)
  520. goto devlink_free;
  521. ds->configure_vlan_while_not_filtering = true;
  522. err = ds->ops->setup(ds);
  523. if (err < 0)
  524. goto unregister_notifier;
  525. err = dsa_switch_setup_tag_protocol(ds);
  526. if (err)
  527. goto teardown;
  528. if (!ds->user_mii_bus && ds->ops->phy_read) {
  529. ds->user_mii_bus = mdiobus_alloc();
  530. if (!ds->user_mii_bus) {
  531. err = -ENOMEM;
  532. goto teardown;
  533. }
  534. dsa_user_mii_bus_init(ds);
  535. err = mdiobus_register(ds->user_mii_bus);
  536. if (err < 0)
  537. goto free_user_mii_bus;
  538. }
  539. dsa_switch_devlink_register(ds);
  540. ds->setup = true;
  541. return 0;
  542. free_user_mii_bus:
  543. if (ds->user_mii_bus && ds->ops->phy_read)
  544. mdiobus_free(ds->user_mii_bus);
  545. teardown:
  546. if (ds->ops->teardown)
  547. ds->ops->teardown(ds);
  548. unregister_notifier:
  549. dsa_switch_unregister_notifier(ds);
  550. devlink_free:
  551. dsa_switch_devlink_free(ds);
  552. return err;
  553. }
  554. static void dsa_switch_teardown(struct dsa_switch *ds)
  555. {
  556. if (!ds->setup)
  557. return;
  558. dsa_switch_devlink_unregister(ds);
  559. if (ds->user_mii_bus && ds->ops->phy_read) {
  560. mdiobus_unregister(ds->user_mii_bus);
  561. mdiobus_free(ds->user_mii_bus);
  562. ds->user_mii_bus = NULL;
  563. }
  564. dsa_switch_teardown_tag_protocol(ds);
  565. if (ds->ops->teardown)
  566. ds->ops->teardown(ds);
  567. dsa_switch_unregister_notifier(ds);
  568. dsa_switch_devlink_free(ds);
  569. ds->setup = false;
  570. }
  571. /* First tear down the non-shared, then the shared ports. This ensures that
  572. * all work items scheduled by our switchdev handlers for user ports have
  573. * completed before we destroy the refcounting kept on the shared ports.
  574. */
  575. static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
  576. {
  577. struct dsa_port *dp;
  578. list_for_each_entry(dp, &dst->ports, list)
  579. if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
  580. dsa_port_teardown(dp);
  581. dsa_flush_workqueue();
  582. list_for_each_entry(dp, &dst->ports, list)
  583. if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
  584. dsa_port_teardown(dp);
  585. }
  586. static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
  587. {
  588. struct dsa_port *dp;
  589. list_for_each_entry(dp, &dst->ports, list)
  590. dsa_switch_teardown(dp->ds);
  591. }
  592. /* Bring shared ports up first, then non-shared ports */
  593. static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
  594. {
  595. struct dsa_port *dp;
  596. int err = 0;
  597. list_for_each_entry(dp, &dst->ports, list) {
  598. if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
  599. err = dsa_port_setup(dp);
  600. if (err)
  601. goto teardown;
  602. }
  603. }
  604. list_for_each_entry(dp, &dst->ports, list) {
  605. if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
  606. err = dsa_port_setup(dp);
  607. if (err) {
  608. err = dsa_port_setup_as_unused(dp);
  609. if (err)
  610. goto teardown;
  611. }
  612. }
  613. }
  614. return 0;
  615. teardown:
  616. dsa_tree_teardown_ports(dst);
  617. return err;
  618. }
  619. static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
  620. {
  621. struct dsa_port *dp;
  622. int err = 0;
  623. list_for_each_entry(dp, &dst->ports, list) {
  624. err = dsa_switch_setup(dp->ds);
  625. if (err) {
  626. dsa_tree_teardown_switches(dst);
  627. break;
  628. }
  629. }
  630. return err;
  631. }
  632. static int dsa_tree_setup_conduit(struct dsa_switch_tree *dst)
  633. {
  634. struct dsa_port *cpu_dp;
  635. int err = 0;
  636. rtnl_lock();
  637. dsa_tree_for_each_cpu_port(cpu_dp, dst) {
  638. struct net_device *conduit = cpu_dp->conduit;
  639. bool admin_up = (conduit->flags & IFF_UP) &&
  640. !qdisc_tx_is_noop(conduit);
  641. err = dsa_conduit_setup(conduit, cpu_dp);
  642. if (err)
  643. break;
  644. /* Replay conduit state event */
  645. dsa_tree_conduit_admin_state_change(dst, conduit, admin_up);
  646. dsa_tree_conduit_oper_state_change(dst, conduit,
  647. netif_oper_up(conduit));
  648. }
  649. rtnl_unlock();
  650. return err;
  651. }
  652. static void dsa_tree_teardown_conduit(struct dsa_switch_tree *dst)
  653. {
  654. struct dsa_port *cpu_dp;
  655. rtnl_lock();
  656. dsa_tree_for_each_cpu_port(cpu_dp, dst) {
  657. struct net_device *conduit = cpu_dp->conduit;
  658. /* Synthesizing an "admin down" state is sufficient for
  659. * the switches to get a notification if the conduit is
  660. * currently up and running.
  661. */
  662. dsa_tree_conduit_admin_state_change(dst, conduit, false);
  663. dsa_conduit_teardown(conduit);
  664. }
  665. rtnl_unlock();
  666. }
  667. static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
  668. {
  669. unsigned int len = 0;
  670. struct dsa_port *dp;
  671. list_for_each_entry(dp, &dst->ports, list) {
  672. if (dp->ds->num_lag_ids > len)
  673. len = dp->ds->num_lag_ids;
  674. }
  675. if (!len)
  676. return 0;
  677. dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
  678. if (!dst->lags)
  679. return -ENOMEM;
  680. dst->lags_len = len;
  681. return 0;
  682. }
  683. static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
  684. {
  685. kfree(dst->lags);
  686. }
  687. static void dsa_tree_teardown_routing_table(struct dsa_switch_tree *dst)
  688. {
  689. struct dsa_link *dl, *next;
  690. list_for_each_entry_safe(dl, next, &dst->rtable, list) {
  691. list_del(&dl->list);
  692. kfree(dl);
  693. }
  694. }
  695. static int dsa_tree_setup(struct dsa_switch_tree *dst)
  696. {
  697. bool complete;
  698. int err;
  699. if (dst->setup) {
  700. pr_err("DSA: tree %d already setup! Disjoint trees?\n",
  701. dst->index);
  702. return -EEXIST;
  703. }
  704. complete = dsa_tree_setup_routing_table(dst);
  705. if (!complete)
  706. return 0;
  707. err = dsa_tree_setup_cpu_ports(dst);
  708. if (err)
  709. goto teardown_rtable;
  710. err = dsa_tree_setup_switches(dst);
  711. if (err)
  712. goto teardown_cpu_ports;
  713. err = dsa_tree_setup_ports(dst);
  714. if (err)
  715. goto teardown_switches;
  716. err = dsa_tree_setup_conduit(dst);
  717. if (err)
  718. goto teardown_ports;
  719. err = dsa_tree_setup_lags(dst);
  720. if (err)
  721. goto teardown_conduit;
  722. dst->setup = true;
  723. pr_info("DSA: tree %d setup\n", dst->index);
  724. return 0;
  725. teardown_conduit:
  726. dsa_tree_teardown_conduit(dst);
  727. teardown_ports:
  728. dsa_tree_teardown_ports(dst);
  729. teardown_switches:
  730. dsa_tree_teardown_switches(dst);
  731. teardown_cpu_ports:
  732. dsa_tree_teardown_cpu_ports(dst);
  733. teardown_rtable:
  734. dsa_tree_teardown_routing_table(dst);
  735. return err;
  736. }
  737. static void dsa_tree_teardown(struct dsa_switch_tree *dst)
  738. {
  739. if (!dst->setup)
  740. return;
  741. dsa_tree_teardown_lags(dst);
  742. dsa_tree_teardown_conduit(dst);
  743. dsa_tree_teardown_ports(dst);
  744. dsa_tree_teardown_switches(dst);
  745. dsa_tree_teardown_cpu_ports(dst);
  746. dsa_tree_teardown_routing_table(dst);
  747. pr_info("DSA: tree %d torn down\n", dst->index);
  748. dst->setup = false;
  749. }
  750. static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
  751. const struct dsa_device_ops *tag_ops)
  752. {
  753. const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
  754. struct dsa_notifier_tag_proto_info info;
  755. int err;
  756. dst->tag_ops = tag_ops;
  757. /* Notify the switches from this tree about the connection
  758. * to the new tagger
  759. */
  760. info.tag_ops = tag_ops;
  761. err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
  762. if (err && err != -EOPNOTSUPP)
  763. goto out_disconnect;
  764. /* Notify the old tagger about the disconnection from this tree */
  765. info.tag_ops = old_tag_ops;
  766. dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
  767. return 0;
  768. out_disconnect:
  769. info.tag_ops = tag_ops;
  770. dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
  771. dst->tag_ops = old_tag_ops;
  772. return err;
  773. }
  774. /* Since the dsa/tagging sysfs device attribute is per conduit, the assumption
  775. * is that all DSA switches within a tree share the same tagger, otherwise
  776. * they would have formed disjoint trees (different "dsa,member" values).
  777. */
  778. int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
  779. const struct dsa_device_ops *tag_ops,
  780. const struct dsa_device_ops *old_tag_ops)
  781. {
  782. struct dsa_notifier_tag_proto_info info;
  783. struct dsa_port *dp;
  784. int err = -EBUSY;
  785. if (!rtnl_trylock())
  786. return restart_syscall();
  787. /* At the moment we don't allow changing the tag protocol under
  788. * traffic. The rtnl_mutex also happens to serialize concurrent
  789. * attempts to change the tagging protocol. If we ever lift the IFF_UP
  790. * restriction, there needs to be another mutex which serializes this.
  791. */
  792. dsa_tree_for_each_user_port(dp, dst) {
  793. if (dsa_port_to_conduit(dp)->flags & IFF_UP)
  794. goto out_unlock;
  795. if (dp->user->flags & IFF_UP)
  796. goto out_unlock;
  797. }
  798. /* Notify the tag protocol change */
  799. info.tag_ops = tag_ops;
  800. err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
  801. if (err)
  802. goto out_unwind_tagger;
  803. err = dsa_tree_bind_tag_proto(dst, tag_ops);
  804. if (err)
  805. goto out_unwind_tagger;
  806. rtnl_unlock();
  807. return 0;
  808. out_unwind_tagger:
  809. info.tag_ops = old_tag_ops;
  810. dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
  811. out_unlock:
  812. rtnl_unlock();
  813. return err;
  814. }
  815. static void dsa_tree_conduit_state_change(struct dsa_switch_tree *dst,
  816. struct net_device *conduit)
  817. {
  818. struct dsa_notifier_conduit_state_info info;
  819. struct dsa_port *cpu_dp = conduit->dsa_ptr;
  820. info.conduit = conduit;
  821. info.operational = dsa_port_conduit_is_operational(cpu_dp);
  822. dsa_tree_notify(dst, DSA_NOTIFIER_CONDUIT_STATE_CHANGE, &info);
  823. }
  824. void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst,
  825. struct net_device *conduit,
  826. bool up)
  827. {
  828. struct dsa_port *cpu_dp = conduit->dsa_ptr;
  829. bool notify = false;
  830. /* Don't keep track of admin state on LAG DSA conduits,
  831. * but rather just of physical DSA conduits
  832. */
  833. if (netif_is_lag_master(conduit))
  834. return;
  835. if ((dsa_port_conduit_is_operational(cpu_dp)) !=
  836. (up && cpu_dp->conduit_oper_up))
  837. notify = true;
  838. cpu_dp->conduit_admin_up = up;
  839. if (notify)
  840. dsa_tree_conduit_state_change(dst, conduit);
  841. }
  842. void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst,
  843. struct net_device *conduit,
  844. bool up)
  845. {
  846. struct dsa_port *cpu_dp = conduit->dsa_ptr;
  847. bool notify = false;
  848. /* Don't keep track of oper state on LAG DSA conduits,
  849. * but rather just of physical DSA conduits
  850. */
  851. if (netif_is_lag_master(conduit))
  852. return;
  853. if ((dsa_port_conduit_is_operational(cpu_dp)) !=
  854. (cpu_dp->conduit_admin_up && up))
  855. notify = true;
  856. cpu_dp->conduit_oper_up = up;
  857. if (notify)
  858. dsa_tree_conduit_state_change(dst, conduit);
  859. }
  860. static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
  861. {
  862. struct dsa_switch_tree *dst = ds->dst;
  863. struct dsa_port *dp;
  864. dsa_switch_for_each_port(dp, ds)
  865. if (dp->index == index)
  866. return dp;
  867. dp = kzalloc(sizeof(*dp), GFP_KERNEL);
  868. if (!dp)
  869. return NULL;
  870. dp->ds = ds;
  871. dp->index = index;
  872. mutex_init(&dp->addr_lists_lock);
  873. mutex_init(&dp->vlans_lock);
  874. INIT_LIST_HEAD(&dp->fdbs);
  875. INIT_LIST_HEAD(&dp->mdbs);
  876. INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */
  877. INIT_LIST_HEAD(&dp->list);
  878. list_add_tail(&dp->list, &dst->ports);
  879. return dp;
  880. }
  881. static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
  882. {
  883. dp->type = DSA_PORT_TYPE_USER;
  884. dp->name = name;
  885. return 0;
  886. }
  887. static int dsa_port_parse_dsa(struct dsa_port *dp)
  888. {
  889. dp->type = DSA_PORT_TYPE_DSA;
  890. return 0;
  891. }
  892. static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
  893. struct net_device *conduit)
  894. {
  895. enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
  896. struct dsa_switch *mds, *ds = dp->ds;
  897. unsigned int mdp_upstream;
  898. struct dsa_port *mdp;
  899. /* It is possible to stack DSA switches onto one another when that
  900. * happens the switch driver may want to know if its tagging protocol
  901. * is going to work in such a configuration.
  902. */
  903. if (dsa_user_dev_check(conduit)) {
  904. mdp = dsa_user_to_port(conduit);
  905. mds = mdp->ds;
  906. mdp_upstream = dsa_upstream_port(mds, mdp->index);
  907. tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
  908. DSA_TAG_PROTO_NONE);
  909. }
  910. /* If the conduit device is not itself a DSA user in a disjoint DSA
  911. * tree, then return immediately.
  912. */
  913. return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
  914. }
  915. static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit,
  916. const char *user_protocol)
  917. {
  918. const struct dsa_device_ops *tag_ops = NULL;
  919. struct dsa_switch *ds = dp->ds;
  920. struct dsa_switch_tree *dst = ds->dst;
  921. enum dsa_tag_protocol default_proto;
  922. /* Find out which protocol the switch would prefer. */
  923. default_proto = dsa_get_tag_protocol(dp, conduit);
  924. if (dst->default_proto) {
  925. if (dst->default_proto != default_proto) {
  926. dev_err(ds->dev,
  927. "A DSA switch tree can have only one tagging protocol\n");
  928. return -EINVAL;
  929. }
  930. } else {
  931. dst->default_proto = default_proto;
  932. }
  933. /* See if the user wants to override that preference. */
  934. if (user_protocol) {
  935. if (!ds->ops->change_tag_protocol) {
  936. dev_err(ds->dev, "Tag protocol cannot be modified\n");
  937. return -EINVAL;
  938. }
  939. tag_ops = dsa_tag_driver_get_by_name(user_protocol);
  940. if (IS_ERR(tag_ops)) {
  941. dev_warn(ds->dev,
  942. "Failed to find a tagging driver for protocol %s, using default\n",
  943. user_protocol);
  944. tag_ops = NULL;
  945. }
  946. }
  947. if (!tag_ops)
  948. tag_ops = dsa_tag_driver_get_by_id(default_proto);
  949. if (IS_ERR(tag_ops)) {
  950. if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
  951. return -EPROBE_DEFER;
  952. dev_warn(ds->dev, "No tagger for this switch\n");
  953. return PTR_ERR(tag_ops);
  954. }
  955. if (dst->tag_ops) {
  956. if (dst->tag_ops != tag_ops) {
  957. dev_err(ds->dev,
  958. "A DSA switch tree can have only one tagging protocol\n");
  959. dsa_tag_driver_put(tag_ops);
  960. return -EINVAL;
  961. }
  962. /* In the case of multiple CPU ports per switch, the tagging
  963. * protocol is still reference-counted only per switch tree.
  964. */
  965. dsa_tag_driver_put(tag_ops);
  966. } else {
  967. dst->tag_ops = tag_ops;
  968. }
  969. dp->conduit = conduit;
  970. dp->type = DSA_PORT_TYPE_CPU;
  971. dsa_port_set_tag_protocol(dp, dst->tag_ops);
  972. dp->dst = dst;
  973. /* At this point, the tree may be configured to use a different
  974. * tagger than the one chosen by the switch driver during
  975. * .setup, in the case when a user selects a custom protocol
  976. * through the DT.
  977. *
  978. * This is resolved by syncing the driver with the tree in
  979. * dsa_switch_setup_tag_protocol once .setup has run and the
  980. * driver is ready to accept calls to .change_tag_protocol. If
  981. * the driver does not support the custom protocol at that
  982. * point, the tree is wholly rejected, thereby ensuring that the
  983. * tree and driver are always in agreement on the protocol to
  984. * use.
  985. */
  986. return 0;
  987. }
  988. static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
  989. {
  990. struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
  991. const char *name = of_get_property(dn, "label", NULL);
  992. bool link = of_property_read_bool(dn, "link");
  993. dp->dn = dn;
  994. if (ethernet) {
  995. struct net_device *conduit;
  996. const char *user_protocol;
  997. conduit = of_find_net_device_by_node(ethernet);
  998. of_node_put(ethernet);
  999. if (!conduit)
  1000. return -EPROBE_DEFER;
  1001. user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
  1002. return dsa_port_parse_cpu(dp, conduit, user_protocol);
  1003. }
  1004. if (link)
  1005. return dsa_port_parse_dsa(dp);
  1006. return dsa_port_parse_user(dp, name);
  1007. }
  1008. static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
  1009. struct device_node *dn)
  1010. {
  1011. struct device_node *ports, *port;
  1012. struct dsa_port *dp;
  1013. int err = 0;
  1014. u32 reg;
  1015. ports = of_get_child_by_name(dn, "ports");
  1016. if (!ports) {
  1017. /* The second possibility is "ethernet-ports" */
  1018. ports = of_get_child_by_name(dn, "ethernet-ports");
  1019. if (!ports) {
  1020. dev_err(ds->dev, "no ports child node found\n");
  1021. return -EINVAL;
  1022. }
  1023. }
  1024. for_each_available_child_of_node(ports, port) {
  1025. err = of_property_read_u32(port, "reg", &reg);
  1026. if (err) {
  1027. of_node_put(port);
  1028. goto out_put_node;
  1029. }
  1030. if (reg >= ds->num_ports) {
  1031. dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
  1032. port, reg, ds->num_ports);
  1033. of_node_put(port);
  1034. err = -EINVAL;
  1035. goto out_put_node;
  1036. }
  1037. dp = dsa_to_port(ds, reg);
  1038. err = dsa_port_parse_of(dp, port);
  1039. if (err) {
  1040. of_node_put(port);
  1041. goto out_put_node;
  1042. }
  1043. }
  1044. out_put_node:
  1045. of_node_put(ports);
  1046. return err;
  1047. }
  1048. static int dsa_switch_parse_member_of(struct dsa_switch *ds,
  1049. struct device_node *dn)
  1050. {
  1051. u32 m[2] = { 0, 0 };
  1052. int sz;
  1053. /* Don't error out if this optional property isn't found */
  1054. sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
  1055. if (sz < 0 && sz != -EINVAL)
  1056. return sz;
  1057. ds->index = m[1];
  1058. ds->dst = dsa_tree_touch(m[0]);
  1059. if (!ds->dst)
  1060. return -ENOMEM;
  1061. if (dsa_switch_find(ds->dst->index, ds->index)) {
  1062. dev_err(ds->dev,
  1063. "A DSA switch with index %d already exists in tree %d\n",
  1064. ds->index, ds->dst->index);
  1065. return -EEXIST;
  1066. }
  1067. if (ds->dst->last_switch < ds->index)
  1068. ds->dst->last_switch = ds->index;
  1069. return 0;
  1070. }
  1071. static int dsa_switch_touch_ports(struct dsa_switch *ds)
  1072. {
  1073. struct dsa_port *dp;
  1074. int port;
  1075. for (port = 0; port < ds->num_ports; port++) {
  1076. dp = dsa_port_touch(ds, port);
  1077. if (!dp)
  1078. return -ENOMEM;
  1079. }
  1080. return 0;
  1081. }
  1082. static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
  1083. {
  1084. int err;
  1085. err = dsa_switch_parse_member_of(ds, dn);
  1086. if (err)
  1087. return err;
  1088. err = dsa_switch_touch_ports(ds);
  1089. if (err)
  1090. return err;
  1091. return dsa_switch_parse_ports_of(ds, dn);
  1092. }
  1093. static int dev_is_class(struct device *dev, void *class)
  1094. {
  1095. if (dev->class != NULL && !strcmp(dev->class->name, class))
  1096. return 1;
  1097. return 0;
  1098. }
  1099. static struct device *dev_find_class(struct device *parent, char *class)
  1100. {
  1101. if (dev_is_class(parent, class)) {
  1102. get_device(parent);
  1103. return parent;
  1104. }
  1105. return device_find_child(parent, class, dev_is_class);
  1106. }
  1107. static struct net_device *dsa_dev_to_net_device(struct device *dev)
  1108. {
  1109. struct device *d;
  1110. d = dev_find_class(dev, "net");
  1111. if (d != NULL) {
  1112. struct net_device *nd;
  1113. nd = to_net_dev(d);
  1114. dev_hold(nd);
  1115. put_device(d);
  1116. return nd;
  1117. }
  1118. return NULL;
  1119. }
  1120. static int dsa_port_parse(struct dsa_port *dp, const char *name,
  1121. struct device *dev)
  1122. {
  1123. if (!strcmp(name, "cpu")) {
  1124. struct net_device *conduit;
  1125. conduit = dsa_dev_to_net_device(dev);
  1126. if (!conduit)
  1127. return -EPROBE_DEFER;
  1128. dev_put(conduit);
  1129. return dsa_port_parse_cpu(dp, conduit, NULL);
  1130. }
  1131. if (!strcmp(name, "dsa"))
  1132. return dsa_port_parse_dsa(dp);
  1133. return dsa_port_parse_user(dp, name);
  1134. }
  1135. static int dsa_switch_parse_ports(struct dsa_switch *ds,
  1136. struct dsa_chip_data *cd)
  1137. {
  1138. bool valid_name_found = false;
  1139. struct dsa_port *dp;
  1140. struct device *dev;
  1141. const char *name;
  1142. unsigned int i;
  1143. int err;
  1144. for (i = 0; i < DSA_MAX_PORTS; i++) {
  1145. name = cd->port_names[i];
  1146. dev = cd->netdev[i];
  1147. dp = dsa_to_port(ds, i);
  1148. if (!name)
  1149. continue;
  1150. err = dsa_port_parse(dp, name, dev);
  1151. if (err)
  1152. return err;
  1153. valid_name_found = true;
  1154. }
  1155. if (!valid_name_found && i == DSA_MAX_PORTS)
  1156. return -EINVAL;
  1157. return 0;
  1158. }
  1159. static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
  1160. {
  1161. int err;
  1162. ds->cd = cd;
  1163. /* We don't support interconnected switches nor multiple trees via
  1164. * platform data, so this is the unique switch of the tree.
  1165. */
  1166. ds->index = 0;
  1167. ds->dst = dsa_tree_touch(0);
  1168. if (!ds->dst)
  1169. return -ENOMEM;
  1170. err = dsa_switch_touch_ports(ds);
  1171. if (err)
  1172. return err;
  1173. return dsa_switch_parse_ports(ds, cd);
  1174. }
  1175. static void dsa_switch_release_ports(struct dsa_switch *ds)
  1176. {
  1177. struct dsa_mac_addr *a, *tmp;
  1178. struct dsa_port *dp, *next;
  1179. struct dsa_vlan *v, *n;
  1180. dsa_switch_for_each_port_safe(dp, next, ds) {
  1181. /* These are either entries that upper layers lost track of
  1182. * (probably due to bugs), or installed through interfaces
  1183. * where one does not necessarily have to remove them, like
  1184. * ndo_dflt_fdb_add().
  1185. */
  1186. list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
  1187. dev_info(ds->dev,
  1188. "Cleaning up unicast address %pM vid %u from port %d\n",
  1189. a->addr, a->vid, dp->index);
  1190. list_del(&a->list);
  1191. kfree(a);
  1192. }
  1193. list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
  1194. dev_info(ds->dev,
  1195. "Cleaning up multicast address %pM vid %u from port %d\n",
  1196. a->addr, a->vid, dp->index);
  1197. list_del(&a->list);
  1198. kfree(a);
  1199. }
  1200. /* These are entries that upper layers have lost track of,
  1201. * probably due to bugs, but also due to dsa_port_do_vlan_del()
  1202. * having failed and the VLAN entry still lingering on.
  1203. */
  1204. list_for_each_entry_safe(v, n, &dp->vlans, list) {
  1205. dev_info(ds->dev,
  1206. "Cleaning up vid %u from port %d\n",
  1207. v->vid, dp->index);
  1208. list_del(&v->list);
  1209. kfree(v);
  1210. }
  1211. list_del(&dp->list);
  1212. kfree(dp);
  1213. }
  1214. }
  1215. static int dsa_switch_probe(struct dsa_switch *ds)
  1216. {
  1217. struct dsa_switch_tree *dst;
  1218. struct dsa_chip_data *pdata;
  1219. struct device_node *np;
  1220. int err;
  1221. if (!ds->dev)
  1222. return -ENODEV;
  1223. pdata = ds->dev->platform_data;
  1224. np = ds->dev->of_node;
  1225. if (!ds->num_ports)
  1226. return -EINVAL;
  1227. if (ds->phylink_mac_ops) {
  1228. if (ds->ops->phylink_mac_select_pcs ||
  1229. ds->ops->phylink_mac_config ||
  1230. ds->ops->phylink_mac_link_down ||
  1231. ds->ops->phylink_mac_link_up)
  1232. return -EINVAL;
  1233. }
  1234. if (np) {
  1235. err = dsa_switch_parse_of(ds, np);
  1236. if (err)
  1237. dsa_switch_release_ports(ds);
  1238. } else if (pdata) {
  1239. err = dsa_switch_parse(ds, pdata);
  1240. if (err)
  1241. dsa_switch_release_ports(ds);
  1242. } else {
  1243. err = -ENODEV;
  1244. }
  1245. if (err)
  1246. return err;
  1247. dst = ds->dst;
  1248. dsa_tree_get(dst);
  1249. err = dsa_tree_setup(dst);
  1250. if (err) {
  1251. dsa_switch_release_ports(ds);
  1252. dsa_tree_put(dst);
  1253. }
  1254. return err;
  1255. }
  1256. int dsa_register_switch(struct dsa_switch *ds)
  1257. {
  1258. int err;
  1259. mutex_lock(&dsa2_mutex);
  1260. err = dsa_switch_probe(ds);
  1261. dsa_tree_put(ds->dst);
  1262. mutex_unlock(&dsa2_mutex);
  1263. return err;
  1264. }
  1265. EXPORT_SYMBOL_GPL(dsa_register_switch);
  1266. static void dsa_switch_remove(struct dsa_switch *ds)
  1267. {
  1268. struct dsa_switch_tree *dst = ds->dst;
  1269. dsa_tree_teardown(dst);
  1270. dsa_switch_release_ports(ds);
  1271. dsa_tree_put(dst);
  1272. }
  1273. void dsa_unregister_switch(struct dsa_switch *ds)
  1274. {
  1275. mutex_lock(&dsa2_mutex);
  1276. dsa_switch_remove(ds);
  1277. mutex_unlock(&dsa2_mutex);
  1278. }
  1279. EXPORT_SYMBOL_GPL(dsa_unregister_switch);
  1280. /* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is
  1281. * blocking that operation from completion, due to the dev_hold taken inside
  1282. * netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of
  1283. * the DSA conduit, so that the system can reboot successfully.
  1284. */
  1285. void dsa_switch_shutdown(struct dsa_switch *ds)
  1286. {
  1287. struct net_device *conduit, *user_dev;
  1288. LIST_HEAD(close_list);
  1289. struct dsa_port *dp;
  1290. mutex_lock(&dsa2_mutex);
  1291. if (!ds->setup)
  1292. goto out;
  1293. rtnl_lock();
  1294. dsa_switch_for_each_cpu_port(dp, ds)
  1295. list_add(&dp->conduit->close_list, &close_list);
  1296. dev_close_many(&close_list, true);
  1297. dsa_switch_for_each_user_port(dp, ds) {
  1298. conduit = dsa_port_to_conduit(dp);
  1299. user_dev = dp->user;
  1300. netif_device_detach(user_dev);
  1301. netdev_upper_dev_unlink(conduit, user_dev);
  1302. }
  1303. /* Disconnect from further netdevice notifiers on the conduit,
  1304. * since netdev_uses_dsa() will now return false.
  1305. */
  1306. dsa_switch_for_each_cpu_port(dp, ds)
  1307. dp->conduit->dsa_ptr = NULL;
  1308. rtnl_unlock();
  1309. out:
  1310. mutex_unlock(&dsa2_mutex);
  1311. }
  1312. EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
  1313. #ifdef CONFIG_PM_SLEEP
  1314. static bool dsa_port_is_initialized(const struct dsa_port *dp)
  1315. {
  1316. return dp->type == DSA_PORT_TYPE_USER && dp->user;
  1317. }
  1318. int dsa_switch_suspend(struct dsa_switch *ds)
  1319. {
  1320. struct dsa_port *dp;
  1321. int ret = 0;
  1322. /* Suspend user network devices */
  1323. dsa_switch_for_each_port(dp, ds) {
  1324. if (!dsa_port_is_initialized(dp))
  1325. continue;
  1326. ret = dsa_user_suspend(dp->user);
  1327. if (ret)
  1328. return ret;
  1329. }
  1330. if (ds->ops->suspend)
  1331. ret = ds->ops->suspend(ds);
  1332. return ret;
  1333. }
  1334. EXPORT_SYMBOL_GPL(dsa_switch_suspend);
  1335. int dsa_switch_resume(struct dsa_switch *ds)
  1336. {
  1337. struct dsa_port *dp;
  1338. int ret = 0;
  1339. if (ds->ops->resume)
  1340. ret = ds->ops->resume(ds);
  1341. if (ret)
  1342. return ret;
  1343. /* Resume user network devices */
  1344. dsa_switch_for_each_port(dp, ds) {
  1345. if (!dsa_port_is_initialized(dp))
  1346. continue;
  1347. ret = dsa_user_resume(dp->user);
  1348. if (ret)
  1349. return ret;
  1350. }
  1351. return 0;
  1352. }
  1353. EXPORT_SYMBOL_GPL(dsa_switch_resume);
  1354. #endif
  1355. struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
  1356. {
  1357. if (!netdev || !dsa_user_dev_check(netdev))
  1358. return ERR_PTR(-ENODEV);
  1359. return dsa_user_to_port(netdev);
  1360. }
  1361. EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
  1362. bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
  1363. {
  1364. if (a->type != b->type)
  1365. return false;
  1366. switch (a->type) {
  1367. case DSA_DB_PORT:
  1368. return a->dp == b->dp;
  1369. case DSA_DB_LAG:
  1370. return a->lag.dev == b->lag.dev;
  1371. case DSA_DB_BRIDGE:
  1372. return a->bridge.num == b->bridge.num;
  1373. default:
  1374. WARN_ON(1);
  1375. return false;
  1376. }
  1377. }
  1378. bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
  1379. const unsigned char *addr, u16 vid,
  1380. struct dsa_db db)
  1381. {
  1382. struct dsa_port *dp = dsa_to_port(ds, port);
  1383. struct dsa_mac_addr *a;
  1384. lockdep_assert_held(&dp->addr_lists_lock);
  1385. list_for_each_entry(a, &dp->fdbs, list) {
  1386. if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
  1387. continue;
  1388. if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
  1389. return true;
  1390. }
  1391. return false;
  1392. }
  1393. EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
  1394. bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
  1395. const struct switchdev_obj_port_mdb *mdb,
  1396. struct dsa_db db)
  1397. {
  1398. struct dsa_port *dp = dsa_to_port(ds, port);
  1399. struct dsa_mac_addr *a;
  1400. lockdep_assert_held(&dp->addr_lists_lock);
  1401. list_for_each_entry(a, &dp->mdbs, list) {
  1402. if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
  1403. continue;
  1404. if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
  1405. return true;
  1406. }
  1407. return false;
  1408. }
  1409. EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
  1410. static const struct dsa_stubs __dsa_stubs = {
  1411. .conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate,
  1412. };
  1413. static void dsa_register_stubs(void)
  1414. {
  1415. dsa_stubs = &__dsa_stubs;
  1416. }
  1417. static void dsa_unregister_stubs(void)
  1418. {
  1419. dsa_stubs = NULL;
  1420. }
  1421. static int __init dsa_init_module(void)
  1422. {
  1423. int rc;
  1424. dsa_owq = alloc_ordered_workqueue("dsa_ordered",
  1425. WQ_MEM_RECLAIM);
  1426. if (!dsa_owq)
  1427. return -ENOMEM;
  1428. rc = dsa_user_register_notifier();
  1429. if (rc)
  1430. goto register_notifier_fail;
  1431. dev_add_pack(&dsa_pack_type);
  1432. rc = rtnl_link_register(&dsa_link_ops);
  1433. if (rc)
  1434. goto netlink_register_fail;
  1435. dsa_register_stubs();
  1436. return 0;
  1437. netlink_register_fail:
  1438. dsa_user_unregister_notifier();
  1439. dev_remove_pack(&dsa_pack_type);
  1440. register_notifier_fail:
  1441. destroy_workqueue(dsa_owq);
  1442. return rc;
  1443. }
  1444. module_init(dsa_init_module);
  1445. static void __exit dsa_cleanup_module(void)
  1446. {
  1447. dsa_unregister_stubs();
  1448. rtnl_link_unregister(&dsa_link_ops);
  1449. dsa_user_unregister_notifier();
  1450. dev_remove_pack(&dsa_pack_type);
  1451. destroy_workqueue(dsa_owq);
  1452. }
  1453. module_exit(dsa_cleanup_module);
  1454. MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
  1455. MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
  1456. MODULE_LICENSE("GPL");
  1457. MODULE_ALIAS("platform:dsa");