ncsi-manage.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright Gavin Shan, IBM Corporation 2016.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/init.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/of.h>
  11. #include <linux/platform_device.h>
  12. #include <net/ncsi.h>
  13. #include <net/net_namespace.h>
  14. #include <net/sock.h>
  15. #include <net/addrconf.h>
  16. #include <net/ipv6.h>
  17. #include <net/genetlink.h>
  18. #include "internal.h"
  19. #include "ncsi-pkt.h"
  20. #include "ncsi-netlink.h"
  21. LIST_HEAD(ncsi_dev_list);
  22. DEFINE_SPINLOCK(ncsi_dev_lock);
  23. bool ncsi_channel_has_link(struct ncsi_channel *channel)
  24. {
  25. return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  26. }
  27. bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  28. struct ncsi_channel *channel)
  29. {
  30. struct ncsi_package *np;
  31. struct ncsi_channel *nc;
  32. NCSI_FOR_EACH_PACKAGE(ndp, np)
  33. NCSI_FOR_EACH_CHANNEL(np, nc) {
  34. if (nc == channel)
  35. continue;
  36. if (nc->state == NCSI_CHANNEL_ACTIVE &&
  37. ncsi_channel_has_link(nc))
  38. return false;
  39. }
  40. return true;
  41. }
  42. static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  43. {
  44. struct ncsi_dev *nd = &ndp->ndev;
  45. struct ncsi_package *np;
  46. struct ncsi_channel *nc;
  47. unsigned long flags;
  48. nd->state = ncsi_dev_state_functional;
  49. if (force_down) {
  50. nd->link_up = 0;
  51. goto report;
  52. }
  53. nd->link_up = 0;
  54. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  55. NCSI_FOR_EACH_CHANNEL(np, nc) {
  56. spin_lock_irqsave(&nc->lock, flags);
  57. if (!list_empty(&nc->link) ||
  58. nc->state != NCSI_CHANNEL_ACTIVE) {
  59. spin_unlock_irqrestore(&nc->lock, flags);
  60. continue;
  61. }
  62. if (ncsi_channel_has_link(nc)) {
  63. spin_unlock_irqrestore(&nc->lock, flags);
  64. nd->link_up = 1;
  65. goto report;
  66. }
  67. spin_unlock_irqrestore(&nc->lock, flags);
  68. }
  69. }
  70. report:
  71. nd->handler(nd);
  72. }
  73. static void ncsi_channel_monitor(struct timer_list *t)
  74. {
  75. struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  76. struct ncsi_package *np = nc->package;
  77. struct ncsi_dev_priv *ndp = np->ndp;
  78. struct ncsi_channel_mode *ncm;
  79. struct ncsi_cmd_arg nca;
  80. bool enabled, chained;
  81. unsigned int monitor_state;
  82. unsigned long flags;
  83. int state, ret;
  84. spin_lock_irqsave(&nc->lock, flags);
  85. state = nc->state;
  86. chained = !list_empty(&nc->link);
  87. enabled = nc->monitor.enabled;
  88. monitor_state = nc->monitor.state;
  89. spin_unlock_irqrestore(&nc->lock, flags);
  90. if (!enabled)
  91. return; /* expected race disabling timer */
  92. if (WARN_ON_ONCE(chained))
  93. goto bad_state;
  94. if (state != NCSI_CHANNEL_INACTIVE &&
  95. state != NCSI_CHANNEL_ACTIVE) {
  96. bad_state:
  97. netdev_warn(ndp->ndev.dev,
  98. "Bad NCSI monitor state channel %d 0x%x %s queue\n",
  99. nc->id, state, chained ? "on" : "off");
  100. spin_lock_irqsave(&nc->lock, flags);
  101. nc->monitor.enabled = false;
  102. spin_unlock_irqrestore(&nc->lock, flags);
  103. return;
  104. }
  105. switch (monitor_state) {
  106. case NCSI_CHANNEL_MONITOR_START:
  107. case NCSI_CHANNEL_MONITOR_RETRY:
  108. nca.ndp = ndp;
  109. nca.package = np->id;
  110. nca.channel = nc->id;
  111. nca.type = NCSI_PKT_CMD_GLS;
  112. nca.req_flags = 0;
  113. ret = ncsi_xmit_cmd(&nca);
  114. if (ret)
  115. netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
  116. ret);
  117. break;
  118. case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
  119. break;
  120. default:
  121. netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
  122. nc->id);
  123. ncsi_report_link(ndp, true);
  124. ndp->flags |= NCSI_DEV_RESHUFFLE;
  125. ncm = &nc->modes[NCSI_MODE_LINK];
  126. spin_lock_irqsave(&nc->lock, flags);
  127. nc->monitor.enabled = false;
  128. nc->state = NCSI_CHANNEL_INVISIBLE;
  129. ncm->data[2] &= ~0x1;
  130. spin_unlock_irqrestore(&nc->lock, flags);
  131. spin_lock_irqsave(&ndp->lock, flags);
  132. nc->state = NCSI_CHANNEL_ACTIVE;
  133. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  134. spin_unlock_irqrestore(&ndp->lock, flags);
  135. ncsi_process_next_channel(ndp);
  136. return;
  137. }
  138. spin_lock_irqsave(&nc->lock, flags);
  139. nc->monitor.state++;
  140. spin_unlock_irqrestore(&nc->lock, flags);
  141. mod_timer(&nc->monitor.timer, jiffies + HZ);
  142. }
  143. void ncsi_start_channel_monitor(struct ncsi_channel *nc)
  144. {
  145. unsigned long flags;
  146. spin_lock_irqsave(&nc->lock, flags);
  147. WARN_ON_ONCE(nc->monitor.enabled);
  148. nc->monitor.enabled = true;
  149. nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
  150. spin_unlock_irqrestore(&nc->lock, flags);
  151. mod_timer(&nc->monitor.timer, jiffies + HZ);
  152. }
  153. void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
  154. {
  155. unsigned long flags;
  156. spin_lock_irqsave(&nc->lock, flags);
  157. if (!nc->monitor.enabled) {
  158. spin_unlock_irqrestore(&nc->lock, flags);
  159. return;
  160. }
  161. nc->monitor.enabled = false;
  162. spin_unlock_irqrestore(&nc->lock, flags);
  163. del_timer_sync(&nc->monitor.timer);
  164. }
  165. struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
  166. unsigned char id)
  167. {
  168. struct ncsi_channel *nc;
  169. NCSI_FOR_EACH_CHANNEL(np, nc) {
  170. if (nc->id == id)
  171. return nc;
  172. }
  173. return NULL;
  174. }
  175. struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
  176. {
  177. struct ncsi_channel *nc, *tmp;
  178. int index;
  179. unsigned long flags;
  180. nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
  181. if (!nc)
  182. return NULL;
  183. nc->id = id;
  184. nc->package = np;
  185. nc->state = NCSI_CHANNEL_INACTIVE;
  186. nc->monitor.enabled = false;
  187. timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
  188. spin_lock_init(&nc->lock);
  189. INIT_LIST_HEAD(&nc->link);
  190. for (index = 0; index < NCSI_CAP_MAX; index++)
  191. nc->caps[index].index = index;
  192. for (index = 0; index < NCSI_MODE_MAX; index++)
  193. nc->modes[index].index = index;
  194. spin_lock_irqsave(&np->lock, flags);
  195. tmp = ncsi_find_channel(np, id);
  196. if (tmp) {
  197. spin_unlock_irqrestore(&np->lock, flags);
  198. kfree(nc);
  199. return tmp;
  200. }
  201. list_add_tail_rcu(&nc->node, &np->channels);
  202. np->channel_num++;
  203. spin_unlock_irqrestore(&np->lock, flags);
  204. return nc;
  205. }
  206. static void ncsi_remove_channel(struct ncsi_channel *nc)
  207. {
  208. struct ncsi_package *np = nc->package;
  209. unsigned long flags;
  210. spin_lock_irqsave(&nc->lock, flags);
  211. /* Release filters */
  212. kfree(nc->mac_filter.addrs);
  213. kfree(nc->vlan_filter.vids);
  214. nc->state = NCSI_CHANNEL_INACTIVE;
  215. spin_unlock_irqrestore(&nc->lock, flags);
  216. ncsi_stop_channel_monitor(nc);
  217. /* Remove and free channel */
  218. spin_lock_irqsave(&np->lock, flags);
  219. list_del_rcu(&nc->node);
  220. np->channel_num--;
  221. spin_unlock_irqrestore(&np->lock, flags);
  222. kfree(nc);
  223. }
  224. struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
  225. unsigned char id)
  226. {
  227. struct ncsi_package *np;
  228. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  229. if (np->id == id)
  230. return np;
  231. }
  232. return NULL;
  233. }
  234. struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
  235. unsigned char id)
  236. {
  237. struct ncsi_package *np, *tmp;
  238. unsigned long flags;
  239. np = kzalloc(sizeof(*np), GFP_ATOMIC);
  240. if (!np)
  241. return NULL;
  242. np->id = id;
  243. np->ndp = ndp;
  244. spin_lock_init(&np->lock);
  245. INIT_LIST_HEAD(&np->channels);
  246. np->channel_whitelist = UINT_MAX;
  247. spin_lock_irqsave(&ndp->lock, flags);
  248. tmp = ncsi_find_package(ndp, id);
  249. if (tmp) {
  250. spin_unlock_irqrestore(&ndp->lock, flags);
  251. kfree(np);
  252. return tmp;
  253. }
  254. list_add_tail_rcu(&np->node, &ndp->packages);
  255. ndp->package_num++;
  256. spin_unlock_irqrestore(&ndp->lock, flags);
  257. return np;
  258. }
  259. void ncsi_remove_package(struct ncsi_package *np)
  260. {
  261. struct ncsi_dev_priv *ndp = np->ndp;
  262. struct ncsi_channel *nc, *tmp;
  263. unsigned long flags;
  264. /* Release all child channels */
  265. list_for_each_entry_safe(nc, tmp, &np->channels, node)
  266. ncsi_remove_channel(nc);
  267. /* Remove and free package */
  268. spin_lock_irqsave(&ndp->lock, flags);
  269. list_del_rcu(&np->node);
  270. ndp->package_num--;
  271. spin_unlock_irqrestore(&ndp->lock, flags);
  272. kfree(np);
  273. }
  274. void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
  275. unsigned char id,
  276. struct ncsi_package **np,
  277. struct ncsi_channel **nc)
  278. {
  279. struct ncsi_package *p;
  280. struct ncsi_channel *c;
  281. p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
  282. c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
  283. if (np)
  284. *np = p;
  285. if (nc)
  286. *nc = c;
  287. }
  288. /* For two consecutive NCSI commands, the packet IDs shouldn't
  289. * be same. Otherwise, the bogus response might be replied. So
  290. * the available IDs are allocated in round-robin fashion.
  291. */
  292. struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
  293. unsigned int req_flags)
  294. {
  295. struct ncsi_request *nr = NULL;
  296. int i, limit = ARRAY_SIZE(ndp->requests);
  297. unsigned long flags;
  298. /* Check if there is one available request until the ceiling */
  299. spin_lock_irqsave(&ndp->lock, flags);
  300. for (i = ndp->request_id; i < limit; i++) {
  301. if (ndp->requests[i].used)
  302. continue;
  303. nr = &ndp->requests[i];
  304. nr->used = true;
  305. nr->flags = req_flags;
  306. ndp->request_id = i + 1;
  307. goto found;
  308. }
  309. /* Fail back to check from the starting cursor */
  310. for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
  311. if (ndp->requests[i].used)
  312. continue;
  313. nr = &ndp->requests[i];
  314. nr->used = true;
  315. nr->flags = req_flags;
  316. ndp->request_id = i + 1;
  317. goto found;
  318. }
  319. found:
  320. spin_unlock_irqrestore(&ndp->lock, flags);
  321. return nr;
  322. }
  323. void ncsi_free_request(struct ncsi_request *nr)
  324. {
  325. struct ncsi_dev_priv *ndp = nr->ndp;
  326. struct sk_buff *cmd, *rsp;
  327. unsigned long flags;
  328. bool driven;
  329. if (nr->enabled) {
  330. nr->enabled = false;
  331. del_timer_sync(&nr->timer);
  332. }
  333. spin_lock_irqsave(&ndp->lock, flags);
  334. cmd = nr->cmd;
  335. rsp = nr->rsp;
  336. nr->cmd = NULL;
  337. nr->rsp = NULL;
  338. nr->used = false;
  339. driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
  340. spin_unlock_irqrestore(&ndp->lock, flags);
  341. if (driven && cmd && --ndp->pending_req_num == 0)
  342. schedule_work(&ndp->work);
  343. /* Release command and response */
  344. consume_skb(cmd);
  345. consume_skb(rsp);
  346. }
  347. struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
  348. {
  349. struct ncsi_dev_priv *ndp;
  350. NCSI_FOR_EACH_DEV(ndp) {
  351. if (ndp->ndev.dev == dev)
  352. return &ndp->ndev;
  353. }
  354. return NULL;
  355. }
  356. static void ncsi_request_timeout(struct timer_list *t)
  357. {
  358. struct ncsi_request *nr = from_timer(nr, t, timer);
  359. struct ncsi_dev_priv *ndp = nr->ndp;
  360. struct ncsi_cmd_pkt *cmd;
  361. struct ncsi_package *np;
  362. struct ncsi_channel *nc;
  363. unsigned long flags;
  364. /* If the request already had associated response,
  365. * let the response handler to release it.
  366. */
  367. spin_lock_irqsave(&ndp->lock, flags);
  368. nr->enabled = false;
  369. if (nr->rsp || !nr->cmd) {
  370. spin_unlock_irqrestore(&ndp->lock, flags);
  371. return;
  372. }
  373. spin_unlock_irqrestore(&ndp->lock, flags);
  374. if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
  375. if (nr->cmd) {
  376. /* Find the package */
  377. cmd = (struct ncsi_cmd_pkt *)
  378. skb_network_header(nr->cmd);
  379. ncsi_find_package_and_channel(ndp,
  380. cmd->cmd.common.channel,
  381. &np, &nc);
  382. ncsi_send_netlink_timeout(nr, np, nc);
  383. }
  384. }
  385. /* Release the request */
  386. ncsi_free_request(nr);
  387. }
  388. static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
  389. {
  390. struct ncsi_dev *nd = &ndp->ndev;
  391. struct ncsi_package *np;
  392. struct ncsi_channel *nc, *tmp;
  393. struct ncsi_cmd_arg nca;
  394. unsigned long flags;
  395. int ret;
  396. np = ndp->active_package;
  397. nc = ndp->active_channel;
  398. nca.ndp = ndp;
  399. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  400. switch (nd->state) {
  401. case ncsi_dev_state_suspend:
  402. nd->state = ncsi_dev_state_suspend_select;
  403. fallthrough;
  404. case ncsi_dev_state_suspend_select:
  405. ndp->pending_req_num = 1;
  406. nca.type = NCSI_PKT_CMD_SP;
  407. nca.package = np->id;
  408. nca.channel = NCSI_RESERVED_CHANNEL;
  409. if (ndp->flags & NCSI_DEV_HWA)
  410. nca.bytes[0] = 0;
  411. else
  412. nca.bytes[0] = 1;
  413. /* To retrieve the last link states of channels in current
  414. * package when current active channel needs fail over to
  415. * another one. It means we will possibly select another
  416. * channel as next active one. The link states of channels
  417. * are most important factor of the selection. So we need
  418. * accurate link states. Unfortunately, the link states on
  419. * inactive channels can't be updated with LSC AEN in time.
  420. */
  421. if (ndp->flags & NCSI_DEV_RESHUFFLE)
  422. nd->state = ncsi_dev_state_suspend_gls;
  423. else
  424. nd->state = ncsi_dev_state_suspend_dcnt;
  425. ret = ncsi_xmit_cmd(&nca);
  426. if (ret)
  427. goto error;
  428. break;
  429. case ncsi_dev_state_suspend_gls:
  430. ndp->pending_req_num = 1;
  431. nca.type = NCSI_PKT_CMD_GLS;
  432. nca.package = np->id;
  433. nca.channel = ndp->channel_probe_id;
  434. ret = ncsi_xmit_cmd(&nca);
  435. if (ret)
  436. goto error;
  437. ndp->channel_probe_id++;
  438. if (ndp->channel_probe_id == ndp->channel_count) {
  439. ndp->channel_probe_id = 0;
  440. nd->state = ncsi_dev_state_suspend_dcnt;
  441. }
  442. break;
  443. case ncsi_dev_state_suspend_dcnt:
  444. ndp->pending_req_num = 1;
  445. nca.type = NCSI_PKT_CMD_DCNT;
  446. nca.package = np->id;
  447. nca.channel = nc->id;
  448. nd->state = ncsi_dev_state_suspend_dc;
  449. ret = ncsi_xmit_cmd(&nca);
  450. if (ret)
  451. goto error;
  452. break;
  453. case ncsi_dev_state_suspend_dc:
  454. ndp->pending_req_num = 1;
  455. nca.type = NCSI_PKT_CMD_DC;
  456. nca.package = np->id;
  457. nca.channel = nc->id;
  458. nca.bytes[0] = 1;
  459. nd->state = ncsi_dev_state_suspend_deselect;
  460. ret = ncsi_xmit_cmd(&nca);
  461. if (ret)
  462. goto error;
  463. NCSI_FOR_EACH_CHANNEL(np, tmp) {
  464. /* If there is another channel active on this package
  465. * do not deselect the package.
  466. */
  467. if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
  468. nd->state = ncsi_dev_state_suspend_done;
  469. break;
  470. }
  471. }
  472. break;
  473. case ncsi_dev_state_suspend_deselect:
  474. ndp->pending_req_num = 1;
  475. nca.type = NCSI_PKT_CMD_DP;
  476. nca.package = np->id;
  477. nca.channel = NCSI_RESERVED_CHANNEL;
  478. nd->state = ncsi_dev_state_suspend_done;
  479. ret = ncsi_xmit_cmd(&nca);
  480. if (ret)
  481. goto error;
  482. break;
  483. case ncsi_dev_state_suspend_done:
  484. spin_lock_irqsave(&nc->lock, flags);
  485. nc->state = NCSI_CHANNEL_INACTIVE;
  486. spin_unlock_irqrestore(&nc->lock, flags);
  487. if (ndp->flags & NCSI_DEV_RESET)
  488. ncsi_reset_dev(nd);
  489. else
  490. ncsi_process_next_channel(ndp);
  491. break;
  492. default:
  493. netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
  494. nd->state);
  495. }
  496. return;
  497. error:
  498. nd->state = ncsi_dev_state_functional;
  499. }
  500. /* Check the VLAN filter bitmap for a set filter, and construct a
  501. * "Set VLAN Filter - Disable" packet if found.
  502. */
  503. static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
  504. struct ncsi_cmd_arg *nca)
  505. {
  506. struct ncsi_channel_vlan_filter *ncf;
  507. unsigned long flags;
  508. void *bitmap;
  509. int index;
  510. u16 vid;
  511. ncf = &nc->vlan_filter;
  512. bitmap = &ncf->bitmap;
  513. spin_lock_irqsave(&nc->lock, flags);
  514. index = find_first_bit(bitmap, ncf->n_vids);
  515. if (index >= ncf->n_vids) {
  516. spin_unlock_irqrestore(&nc->lock, flags);
  517. return -1;
  518. }
  519. vid = ncf->vids[index];
  520. clear_bit(index, bitmap);
  521. ncf->vids[index] = 0;
  522. spin_unlock_irqrestore(&nc->lock, flags);
  523. nca->type = NCSI_PKT_CMD_SVF;
  524. nca->words[1] = vid;
  525. /* HW filter index starts at 1 */
  526. nca->bytes[6] = index + 1;
  527. nca->bytes[7] = 0x00;
  528. return 0;
  529. }
  530. /* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
  531. * packet.
  532. */
  533. static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
  534. struct ncsi_cmd_arg *nca)
  535. {
  536. struct ncsi_channel_vlan_filter *ncf;
  537. struct vlan_vid *vlan = NULL;
  538. unsigned long flags;
  539. int i, index;
  540. void *bitmap;
  541. u16 vid;
  542. if (list_empty(&ndp->vlan_vids))
  543. return -1;
  544. ncf = &nc->vlan_filter;
  545. bitmap = &ncf->bitmap;
  546. spin_lock_irqsave(&nc->lock, flags);
  547. rcu_read_lock();
  548. list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
  549. vid = vlan->vid;
  550. for (i = 0; i < ncf->n_vids; i++)
  551. if (ncf->vids[i] == vid) {
  552. vid = 0;
  553. break;
  554. }
  555. if (vid)
  556. break;
  557. }
  558. rcu_read_unlock();
  559. if (!vid) {
  560. /* No VLAN ID is not set */
  561. spin_unlock_irqrestore(&nc->lock, flags);
  562. return -1;
  563. }
  564. index = find_first_zero_bit(bitmap, ncf->n_vids);
  565. if (index < 0 || index >= ncf->n_vids) {
  566. netdev_err(ndp->ndev.dev,
  567. "Channel %u already has all VLAN filters set\n",
  568. nc->id);
  569. spin_unlock_irqrestore(&nc->lock, flags);
  570. return -1;
  571. }
  572. ncf->vids[index] = vid;
  573. set_bit(index, bitmap);
  574. spin_unlock_irqrestore(&nc->lock, flags);
  575. nca->type = NCSI_PKT_CMD_SVF;
  576. nca->words[1] = vid;
  577. /* HW filter index starts at 1 */
  578. nca->bytes[6] = index + 1;
  579. nca->bytes[7] = 0x01;
  580. return 0;
  581. }
  582. static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
  583. {
  584. unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
  585. int ret = 0;
  586. nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
  587. memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
  588. *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
  589. data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
  590. /* PHY Link up attribute */
  591. data[6] = 0x1;
  592. nca->data = data;
  593. ret = ncsi_xmit_cmd(nca);
  594. if (ret)
  595. netdev_err(nca->ndp->ndev.dev,
  596. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  597. nca->type);
  598. return ret;
  599. }
  600. /* NCSI OEM Command APIs */
  601. static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
  602. {
  603. unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
  604. int ret = 0;
  605. nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
  606. memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
  607. *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
  608. data[5] = NCSI_OEM_BCM_CMD_GMA;
  609. nca->data = data;
  610. ret = ncsi_xmit_cmd(nca);
  611. if (ret)
  612. netdev_err(nca->ndp->ndev.dev,
  613. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  614. nca->type);
  615. return ret;
  616. }
  617. static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
  618. {
  619. union {
  620. u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
  621. u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
  622. } u;
  623. int ret = 0;
  624. nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
  625. memset(&u, 0, sizeof(u));
  626. u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
  627. u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
  628. u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
  629. nca->data = u.data_u8;
  630. ret = ncsi_xmit_cmd(nca);
  631. if (ret)
  632. netdev_err(nca->ndp->ndev.dev,
  633. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  634. nca->type);
  635. return ret;
  636. }
  637. static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
  638. {
  639. union {
  640. u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
  641. u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
  642. } u;
  643. int ret = 0;
  644. memset(&u, 0, sizeof(u));
  645. u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
  646. u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
  647. u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
  648. memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
  649. nca->ndp->ndev.dev->dev_addr, ETH_ALEN);
  650. u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
  651. (MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
  652. nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
  653. nca->data = u.data_u8;
  654. ret = ncsi_xmit_cmd(nca);
  655. if (ret)
  656. netdev_err(nca->ndp->ndev.dev,
  657. "NCSI: Failed to transmit cmd 0x%x during probe\n",
  658. nca->type);
  659. return ret;
  660. }
  661. static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
  662. {
  663. unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
  664. int ret = 0;
  665. nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
  666. memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
  667. *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
  668. data[4] = NCSI_OEM_INTEL_CMD_GMA;
  669. nca->data = data;
  670. ret = ncsi_xmit_cmd(nca);
  671. if (ret)
  672. netdev_err(nca->ndp->ndev.dev,
  673. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  674. nca->type);
  675. return ret;
  676. }
  677. /* OEM Command handlers initialization */
  678. static struct ncsi_oem_gma_handler {
  679. unsigned int mfr_id;
  680. int (*handler)(struct ncsi_cmd_arg *nca);
  681. } ncsi_oem_gma_handlers[] = {
  682. { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
  683. { NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
  684. { NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
  685. };
  686. static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
  687. {
  688. struct ncsi_oem_gma_handler *nch = NULL;
  689. int i;
  690. /* This function should only be called once, return if flag set */
  691. if (nca->ndp->gma_flag == 1)
  692. return -1;
  693. /* Find gma handler for given manufacturer id */
  694. for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
  695. if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
  696. if (ncsi_oem_gma_handlers[i].handler)
  697. nch = &ncsi_oem_gma_handlers[i];
  698. break;
  699. }
  700. }
  701. if (!nch) {
  702. netdev_err(nca->ndp->ndev.dev,
  703. "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
  704. mf_id);
  705. return -1;
  706. }
  707. /* Get Mac address from NCSI device */
  708. return nch->handler(nca);
  709. }
  710. /* Determine if a given channel from the channel_queue should be used for Tx */
  711. static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
  712. struct ncsi_channel *nc)
  713. {
  714. struct ncsi_channel_mode *ncm;
  715. struct ncsi_channel *channel;
  716. struct ncsi_package *np;
  717. /* Check if any other channel has Tx enabled; a channel may have already
  718. * been configured and removed from the channel queue.
  719. */
  720. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  721. if (!ndp->multi_package && np != nc->package)
  722. continue;
  723. NCSI_FOR_EACH_CHANNEL(np, channel) {
  724. ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
  725. if (ncm->enable)
  726. return false;
  727. }
  728. }
  729. /* This channel is the preferred channel and has link */
  730. list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
  731. np = channel->package;
  732. if (np->preferred_channel &&
  733. ncsi_channel_has_link(np->preferred_channel)) {
  734. return np->preferred_channel == nc;
  735. }
  736. }
  737. /* This channel has link */
  738. if (ncsi_channel_has_link(nc))
  739. return true;
  740. list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
  741. if (ncsi_channel_has_link(channel))
  742. return false;
  743. /* No other channel has link; default to this one */
  744. return true;
  745. }
  746. /* Change the active Tx channel in a multi-channel setup */
  747. int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
  748. struct ncsi_package *package,
  749. struct ncsi_channel *disable,
  750. struct ncsi_channel *enable)
  751. {
  752. struct ncsi_cmd_arg nca;
  753. struct ncsi_channel *nc;
  754. struct ncsi_package *np;
  755. int ret = 0;
  756. if (!package->multi_channel && !ndp->multi_package)
  757. netdev_warn(ndp->ndev.dev,
  758. "NCSI: Trying to update Tx channel in single-channel mode\n");
  759. nca.ndp = ndp;
  760. nca.req_flags = 0;
  761. /* Find current channel with Tx enabled */
  762. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  763. if (disable)
  764. break;
  765. if (!ndp->multi_package && np != package)
  766. continue;
  767. NCSI_FOR_EACH_CHANNEL(np, nc)
  768. if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
  769. disable = nc;
  770. break;
  771. }
  772. }
  773. /* Find a suitable channel for Tx */
  774. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  775. if (enable)
  776. break;
  777. if (!ndp->multi_package && np != package)
  778. continue;
  779. if (!(ndp->package_whitelist & (0x1 << np->id)))
  780. continue;
  781. if (np->preferred_channel &&
  782. ncsi_channel_has_link(np->preferred_channel)) {
  783. enable = np->preferred_channel;
  784. break;
  785. }
  786. NCSI_FOR_EACH_CHANNEL(np, nc) {
  787. if (!(np->channel_whitelist & 0x1 << nc->id))
  788. continue;
  789. if (nc->state != NCSI_CHANNEL_ACTIVE)
  790. continue;
  791. if (ncsi_channel_has_link(nc)) {
  792. enable = nc;
  793. break;
  794. }
  795. }
  796. }
  797. if (disable == enable)
  798. return -1;
  799. if (!enable)
  800. return -1;
  801. if (disable) {
  802. nca.channel = disable->id;
  803. nca.package = disable->package->id;
  804. nca.type = NCSI_PKT_CMD_DCNT;
  805. ret = ncsi_xmit_cmd(&nca);
  806. if (ret)
  807. netdev_err(ndp->ndev.dev,
  808. "Error %d sending DCNT\n",
  809. ret);
  810. }
  811. netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
  812. nca.channel = enable->id;
  813. nca.package = enable->package->id;
  814. nca.type = NCSI_PKT_CMD_ECNT;
  815. ret = ncsi_xmit_cmd(&nca);
  816. if (ret)
  817. netdev_err(ndp->ndev.dev,
  818. "Error %d sending ECNT\n",
  819. ret);
  820. return ret;
  821. }
  822. static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
  823. {
  824. struct ncsi_package *np = ndp->active_package;
  825. struct ncsi_channel *nc = ndp->active_channel;
  826. struct ncsi_channel *hot_nc = NULL;
  827. struct ncsi_dev *nd = &ndp->ndev;
  828. struct net_device *dev = nd->dev;
  829. struct ncsi_cmd_arg nca;
  830. unsigned char index;
  831. unsigned long flags;
  832. int ret;
  833. nca.ndp = ndp;
  834. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  835. switch (nd->state) {
  836. case ncsi_dev_state_config:
  837. case ncsi_dev_state_config_sp:
  838. ndp->pending_req_num = 1;
  839. /* Select the specific package */
  840. nca.type = NCSI_PKT_CMD_SP;
  841. if (ndp->flags & NCSI_DEV_HWA)
  842. nca.bytes[0] = 0;
  843. else
  844. nca.bytes[0] = 1;
  845. nca.package = np->id;
  846. nca.channel = NCSI_RESERVED_CHANNEL;
  847. ret = ncsi_xmit_cmd(&nca);
  848. if (ret) {
  849. netdev_err(ndp->ndev.dev,
  850. "NCSI: Failed to transmit CMD_SP\n");
  851. goto error;
  852. }
  853. nd->state = ncsi_dev_state_config_cis;
  854. break;
  855. case ncsi_dev_state_config_cis:
  856. ndp->pending_req_num = 1;
  857. /* Clear initial state */
  858. nca.type = NCSI_PKT_CMD_CIS;
  859. nca.package = np->id;
  860. nca.channel = nc->id;
  861. ret = ncsi_xmit_cmd(&nca);
  862. if (ret) {
  863. netdev_err(ndp->ndev.dev,
  864. "NCSI: Failed to transmit CMD_CIS\n");
  865. goto error;
  866. }
  867. nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
  868. ? ncsi_dev_state_config_oem_gma
  869. : ncsi_dev_state_config_clear_vids;
  870. break;
  871. case ncsi_dev_state_config_oem_gma:
  872. nd->state = ncsi_dev_state_config_apply_mac;
  873. nca.package = np->id;
  874. nca.channel = nc->id;
  875. ndp->pending_req_num = 1;
  876. if (nc->version.major >= 1 && nc->version.minor >= 2) {
  877. nca.type = NCSI_PKT_CMD_GMCMA;
  878. ret = ncsi_xmit_cmd(&nca);
  879. } else {
  880. nca.type = NCSI_PKT_CMD_OEM;
  881. ret = ncsi_gma_handler(&nca, nc->version.mf_id);
  882. }
  883. if (ret < 0) {
  884. nd->state = ncsi_dev_state_config_clear_vids;
  885. schedule_work(&ndp->work);
  886. }
  887. break;
  888. case ncsi_dev_state_config_apply_mac:
  889. rtnl_lock();
  890. ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
  891. rtnl_unlock();
  892. if (ret < 0)
  893. netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
  894. nd->state = ncsi_dev_state_config_clear_vids;
  895. fallthrough;
  896. case ncsi_dev_state_config_clear_vids:
  897. case ncsi_dev_state_config_svf:
  898. case ncsi_dev_state_config_ev:
  899. case ncsi_dev_state_config_sma:
  900. case ncsi_dev_state_config_ebf:
  901. case ncsi_dev_state_config_dgmf:
  902. case ncsi_dev_state_config_ecnt:
  903. case ncsi_dev_state_config_ec:
  904. case ncsi_dev_state_config_ae:
  905. case ncsi_dev_state_config_gls:
  906. ndp->pending_req_num = 1;
  907. nca.package = np->id;
  908. nca.channel = nc->id;
  909. /* Clear any active filters on the channel before setting */
  910. if (nd->state == ncsi_dev_state_config_clear_vids) {
  911. ret = clear_one_vid(ndp, nc, &nca);
  912. if (ret) {
  913. nd->state = ncsi_dev_state_config_svf;
  914. schedule_work(&ndp->work);
  915. break;
  916. }
  917. /* Repeat */
  918. nd->state = ncsi_dev_state_config_clear_vids;
  919. /* Add known VLAN tags to the filter */
  920. } else if (nd->state == ncsi_dev_state_config_svf) {
  921. ret = set_one_vid(ndp, nc, &nca);
  922. if (ret) {
  923. nd->state = ncsi_dev_state_config_ev;
  924. schedule_work(&ndp->work);
  925. break;
  926. }
  927. /* Repeat */
  928. nd->state = ncsi_dev_state_config_svf;
  929. /* Enable/Disable the VLAN filter */
  930. } else if (nd->state == ncsi_dev_state_config_ev) {
  931. if (list_empty(&ndp->vlan_vids)) {
  932. nca.type = NCSI_PKT_CMD_DV;
  933. } else {
  934. nca.type = NCSI_PKT_CMD_EV;
  935. nca.bytes[3] = NCSI_CAP_VLAN_NO;
  936. }
  937. nd->state = ncsi_dev_state_config_sma;
  938. } else if (nd->state == ncsi_dev_state_config_sma) {
  939. /* Use first entry in unicast filter table. Note that
  940. * the MAC filter table starts from entry 1 instead of
  941. * 0.
  942. */
  943. nca.type = NCSI_PKT_CMD_SMA;
  944. for (index = 0; index < 6; index++)
  945. nca.bytes[index] = dev->dev_addr[index];
  946. nca.bytes[6] = 0x1;
  947. nca.bytes[7] = 0x1;
  948. nd->state = ncsi_dev_state_config_ebf;
  949. } else if (nd->state == ncsi_dev_state_config_ebf) {
  950. nca.type = NCSI_PKT_CMD_EBF;
  951. nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
  952. /* if multicast global filtering is supported then
  953. * disable it so that all multicast packet will be
  954. * forwarded to management controller
  955. */
  956. if (nc->caps[NCSI_CAP_GENERIC].cap &
  957. NCSI_CAP_GENERIC_MC)
  958. nd->state = ncsi_dev_state_config_dgmf;
  959. else if (ncsi_channel_is_tx(ndp, nc))
  960. nd->state = ncsi_dev_state_config_ecnt;
  961. else
  962. nd->state = ncsi_dev_state_config_ec;
  963. } else if (nd->state == ncsi_dev_state_config_dgmf) {
  964. nca.type = NCSI_PKT_CMD_DGMF;
  965. if (ncsi_channel_is_tx(ndp, nc))
  966. nd->state = ncsi_dev_state_config_ecnt;
  967. else
  968. nd->state = ncsi_dev_state_config_ec;
  969. } else if (nd->state == ncsi_dev_state_config_ecnt) {
  970. if (np->preferred_channel &&
  971. nc != np->preferred_channel)
  972. netdev_info(ndp->ndev.dev,
  973. "NCSI: Tx failed over to channel %u\n",
  974. nc->id);
  975. nca.type = NCSI_PKT_CMD_ECNT;
  976. nd->state = ncsi_dev_state_config_ec;
  977. } else if (nd->state == ncsi_dev_state_config_ec) {
  978. /* Enable AEN if it's supported */
  979. nca.type = NCSI_PKT_CMD_EC;
  980. nd->state = ncsi_dev_state_config_ae;
  981. if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
  982. nd->state = ncsi_dev_state_config_gls;
  983. } else if (nd->state == ncsi_dev_state_config_ae) {
  984. nca.type = NCSI_PKT_CMD_AE;
  985. nca.bytes[0] = 0;
  986. nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
  987. nd->state = ncsi_dev_state_config_gls;
  988. } else if (nd->state == ncsi_dev_state_config_gls) {
  989. nca.type = NCSI_PKT_CMD_GLS;
  990. nd->state = ncsi_dev_state_config_done;
  991. }
  992. ret = ncsi_xmit_cmd(&nca);
  993. if (ret) {
  994. netdev_err(ndp->ndev.dev,
  995. "NCSI: Failed to transmit CMD %x\n",
  996. nca.type);
  997. goto error;
  998. }
  999. break;
  1000. case ncsi_dev_state_config_done:
  1001. netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
  1002. nc->id);
  1003. spin_lock_irqsave(&nc->lock, flags);
  1004. nc->state = NCSI_CHANNEL_ACTIVE;
  1005. if (ndp->flags & NCSI_DEV_RESET) {
  1006. /* A reset event happened during config, start it now */
  1007. nc->reconfigure_needed = false;
  1008. spin_unlock_irqrestore(&nc->lock, flags);
  1009. ncsi_reset_dev(nd);
  1010. break;
  1011. }
  1012. if (nc->reconfigure_needed) {
  1013. /* This channel's configuration has been updated
  1014. * part-way during the config state - start the
  1015. * channel configuration over
  1016. */
  1017. nc->reconfigure_needed = false;
  1018. nc->state = NCSI_CHANNEL_INACTIVE;
  1019. spin_unlock_irqrestore(&nc->lock, flags);
  1020. spin_lock_irqsave(&ndp->lock, flags);
  1021. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  1022. spin_unlock_irqrestore(&ndp->lock, flags);
  1023. netdev_dbg(dev, "Dirty NCSI channel state reset\n");
  1024. ncsi_process_next_channel(ndp);
  1025. break;
  1026. }
  1027. if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
  1028. hot_nc = nc;
  1029. } else {
  1030. hot_nc = NULL;
  1031. netdev_dbg(ndp->ndev.dev,
  1032. "NCSI: channel %u link down after config\n",
  1033. nc->id);
  1034. }
  1035. spin_unlock_irqrestore(&nc->lock, flags);
  1036. /* Update the hot channel */
  1037. spin_lock_irqsave(&ndp->lock, flags);
  1038. ndp->hot_channel = hot_nc;
  1039. spin_unlock_irqrestore(&ndp->lock, flags);
  1040. ncsi_start_channel_monitor(nc);
  1041. ncsi_process_next_channel(ndp);
  1042. break;
  1043. default:
  1044. netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
  1045. nd->state);
  1046. }
  1047. return;
  1048. error:
  1049. ncsi_report_link(ndp, true);
  1050. }
  1051. static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
  1052. {
  1053. struct ncsi_channel *nc, *found, *hot_nc;
  1054. struct ncsi_channel_mode *ncm;
  1055. unsigned long flags, cflags;
  1056. struct ncsi_package *np;
  1057. bool with_link;
  1058. spin_lock_irqsave(&ndp->lock, flags);
  1059. hot_nc = ndp->hot_channel;
  1060. spin_unlock_irqrestore(&ndp->lock, flags);
  1061. /* By default the search is done once an inactive channel with up
  1062. * link is found, unless a preferred channel is set.
  1063. * If multi_package or multi_channel are configured all channels in the
  1064. * whitelist are added to the channel queue.
  1065. */
  1066. found = NULL;
  1067. with_link = false;
  1068. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1069. if (!(ndp->package_whitelist & (0x1 << np->id)))
  1070. continue;
  1071. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1072. if (!(np->channel_whitelist & (0x1 << nc->id)))
  1073. continue;
  1074. spin_lock_irqsave(&nc->lock, cflags);
  1075. if (!list_empty(&nc->link) ||
  1076. nc->state != NCSI_CHANNEL_INACTIVE) {
  1077. spin_unlock_irqrestore(&nc->lock, cflags);
  1078. continue;
  1079. }
  1080. if (!found)
  1081. found = nc;
  1082. if (nc == hot_nc)
  1083. found = nc;
  1084. ncm = &nc->modes[NCSI_MODE_LINK];
  1085. if (ncm->data[2] & 0x1) {
  1086. found = nc;
  1087. with_link = true;
  1088. }
  1089. /* If multi_channel is enabled configure all valid
  1090. * channels whether or not they currently have link
  1091. * so they will have AENs enabled.
  1092. */
  1093. if (with_link || np->multi_channel) {
  1094. spin_lock_irqsave(&ndp->lock, flags);
  1095. list_add_tail_rcu(&nc->link,
  1096. &ndp->channel_queue);
  1097. spin_unlock_irqrestore(&ndp->lock, flags);
  1098. netdev_dbg(ndp->ndev.dev,
  1099. "NCSI: Channel %u added to queue (link %s)\n",
  1100. nc->id,
  1101. ncm->data[2] & 0x1 ? "up" : "down");
  1102. }
  1103. spin_unlock_irqrestore(&nc->lock, cflags);
  1104. if (with_link && !np->multi_channel)
  1105. break;
  1106. }
  1107. if (with_link && !ndp->multi_package)
  1108. break;
  1109. }
  1110. if (list_empty(&ndp->channel_queue) && found) {
  1111. netdev_info(ndp->ndev.dev,
  1112. "NCSI: No channel with link found, configuring channel %u\n",
  1113. found->id);
  1114. spin_lock_irqsave(&ndp->lock, flags);
  1115. list_add_tail_rcu(&found->link, &ndp->channel_queue);
  1116. spin_unlock_irqrestore(&ndp->lock, flags);
  1117. } else if (!found) {
  1118. netdev_warn(ndp->ndev.dev,
  1119. "NCSI: No channel found to configure!\n");
  1120. ncsi_report_link(ndp, true);
  1121. return -ENODEV;
  1122. }
  1123. return ncsi_process_next_channel(ndp);
  1124. }
  1125. static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
  1126. {
  1127. struct ncsi_package *np;
  1128. struct ncsi_channel *nc;
  1129. unsigned int cap;
  1130. bool has_channel = false;
  1131. /* The hardware arbitration is disabled if any one channel
  1132. * doesn't support explicitly.
  1133. */
  1134. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1135. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1136. has_channel = true;
  1137. cap = nc->caps[NCSI_CAP_GENERIC].cap;
  1138. if (!(cap & NCSI_CAP_GENERIC_HWA) ||
  1139. (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
  1140. NCSI_CAP_GENERIC_HWA_SUPPORT) {
  1141. ndp->flags &= ~NCSI_DEV_HWA;
  1142. return false;
  1143. }
  1144. }
  1145. }
  1146. if (has_channel) {
  1147. ndp->flags |= NCSI_DEV_HWA;
  1148. return true;
  1149. }
  1150. ndp->flags &= ~NCSI_DEV_HWA;
  1151. return false;
  1152. }
  1153. static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
  1154. {
  1155. struct ncsi_dev *nd = &ndp->ndev;
  1156. struct ncsi_package *np;
  1157. struct ncsi_cmd_arg nca;
  1158. unsigned char index;
  1159. int ret;
  1160. nca.ndp = ndp;
  1161. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  1162. switch (nd->state) {
  1163. case ncsi_dev_state_probe:
  1164. nd->state = ncsi_dev_state_probe_deselect;
  1165. fallthrough;
  1166. case ncsi_dev_state_probe_deselect:
  1167. ndp->pending_req_num = 8;
  1168. /* Deselect all possible packages */
  1169. nca.type = NCSI_PKT_CMD_DP;
  1170. nca.channel = NCSI_RESERVED_CHANNEL;
  1171. for (index = 0; index < 8; index++) {
  1172. nca.package = index;
  1173. ret = ncsi_xmit_cmd(&nca);
  1174. if (ret)
  1175. goto error;
  1176. }
  1177. nd->state = ncsi_dev_state_probe_package;
  1178. break;
  1179. case ncsi_dev_state_probe_package:
  1180. if (ndp->package_probe_id >= 8) {
  1181. /* Last package probed, finishing */
  1182. ndp->flags |= NCSI_DEV_PROBED;
  1183. break;
  1184. }
  1185. ndp->pending_req_num = 1;
  1186. nca.type = NCSI_PKT_CMD_SP;
  1187. nca.bytes[0] = 1;
  1188. nca.package = ndp->package_probe_id;
  1189. nca.channel = NCSI_RESERVED_CHANNEL;
  1190. ret = ncsi_xmit_cmd(&nca);
  1191. if (ret)
  1192. goto error;
  1193. nd->state = ncsi_dev_state_probe_channel;
  1194. break;
  1195. case ncsi_dev_state_probe_channel:
  1196. ndp->active_package = ncsi_find_package(ndp,
  1197. ndp->package_probe_id);
  1198. if (!ndp->active_package) {
  1199. /* No response */
  1200. nd->state = ncsi_dev_state_probe_dp;
  1201. schedule_work(&ndp->work);
  1202. break;
  1203. }
  1204. nd->state = ncsi_dev_state_probe_cis;
  1205. if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
  1206. ndp->mlx_multi_host)
  1207. nd->state = ncsi_dev_state_probe_mlx_gma;
  1208. schedule_work(&ndp->work);
  1209. break;
  1210. case ncsi_dev_state_probe_mlx_gma:
  1211. ndp->pending_req_num = 1;
  1212. nca.type = NCSI_PKT_CMD_OEM;
  1213. nca.package = ndp->active_package->id;
  1214. nca.channel = 0;
  1215. ret = ncsi_oem_gma_handler_mlx(&nca);
  1216. if (ret)
  1217. goto error;
  1218. nd->state = ncsi_dev_state_probe_mlx_smaf;
  1219. break;
  1220. case ncsi_dev_state_probe_mlx_smaf:
  1221. ndp->pending_req_num = 1;
  1222. nca.type = NCSI_PKT_CMD_OEM;
  1223. nca.package = ndp->active_package->id;
  1224. nca.channel = 0;
  1225. ret = ncsi_oem_smaf_mlx(&nca);
  1226. if (ret)
  1227. goto error;
  1228. nd->state = ncsi_dev_state_probe_cis;
  1229. break;
  1230. case ncsi_dev_state_probe_keep_phy:
  1231. ndp->pending_req_num = 1;
  1232. nca.type = NCSI_PKT_CMD_OEM;
  1233. nca.package = ndp->active_package->id;
  1234. nca.channel = 0;
  1235. ret = ncsi_oem_keep_phy_intel(&nca);
  1236. if (ret)
  1237. goto error;
  1238. nd->state = ncsi_dev_state_probe_gvi;
  1239. break;
  1240. case ncsi_dev_state_probe_cis:
  1241. case ncsi_dev_state_probe_gvi:
  1242. case ncsi_dev_state_probe_gc:
  1243. case ncsi_dev_state_probe_gls:
  1244. np = ndp->active_package;
  1245. ndp->pending_req_num = 1;
  1246. /* Clear initial state Retrieve version, capability or link status */
  1247. if (nd->state == ncsi_dev_state_probe_cis)
  1248. nca.type = NCSI_PKT_CMD_CIS;
  1249. else if (nd->state == ncsi_dev_state_probe_gvi)
  1250. nca.type = NCSI_PKT_CMD_GVI;
  1251. else if (nd->state == ncsi_dev_state_probe_gc)
  1252. nca.type = NCSI_PKT_CMD_GC;
  1253. else
  1254. nca.type = NCSI_PKT_CMD_GLS;
  1255. nca.package = np->id;
  1256. nca.channel = ndp->channel_probe_id;
  1257. ret = ncsi_xmit_cmd(&nca);
  1258. if (ret)
  1259. goto error;
  1260. if (nd->state == ncsi_dev_state_probe_cis) {
  1261. nd->state = ncsi_dev_state_probe_gvi;
  1262. if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
  1263. nd->state = ncsi_dev_state_probe_keep_phy;
  1264. } else if (nd->state == ncsi_dev_state_probe_gvi) {
  1265. nd->state = ncsi_dev_state_probe_gc;
  1266. } else if (nd->state == ncsi_dev_state_probe_gc) {
  1267. nd->state = ncsi_dev_state_probe_gls;
  1268. } else {
  1269. nd->state = ncsi_dev_state_probe_cis;
  1270. ndp->channel_probe_id++;
  1271. }
  1272. if (ndp->channel_probe_id == ndp->channel_count) {
  1273. ndp->channel_probe_id = 0;
  1274. nd->state = ncsi_dev_state_probe_dp;
  1275. }
  1276. break;
  1277. case ncsi_dev_state_probe_dp:
  1278. ndp->pending_req_num = 1;
  1279. /* Deselect the current package */
  1280. nca.type = NCSI_PKT_CMD_DP;
  1281. nca.package = ndp->package_probe_id;
  1282. nca.channel = NCSI_RESERVED_CHANNEL;
  1283. ret = ncsi_xmit_cmd(&nca);
  1284. if (ret)
  1285. goto error;
  1286. /* Probe next package after receiving response */
  1287. ndp->package_probe_id++;
  1288. nd->state = ncsi_dev_state_probe_package;
  1289. ndp->active_package = NULL;
  1290. break;
  1291. default:
  1292. netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
  1293. nd->state);
  1294. }
  1295. if (ndp->flags & NCSI_DEV_PROBED) {
  1296. /* Check if all packages have HWA support */
  1297. ncsi_check_hwa(ndp);
  1298. ncsi_choose_active_channel(ndp);
  1299. }
  1300. return;
  1301. error:
  1302. netdev_err(ndp->ndev.dev,
  1303. "NCSI: Failed to transmit cmd 0x%x during probe\n",
  1304. nca.type);
  1305. ncsi_report_link(ndp, true);
  1306. }
  1307. static void ncsi_dev_work(struct work_struct *work)
  1308. {
  1309. struct ncsi_dev_priv *ndp = container_of(work,
  1310. struct ncsi_dev_priv, work);
  1311. struct ncsi_dev *nd = &ndp->ndev;
  1312. switch (nd->state & ncsi_dev_state_major) {
  1313. case ncsi_dev_state_probe:
  1314. ncsi_probe_channel(ndp);
  1315. break;
  1316. case ncsi_dev_state_suspend:
  1317. ncsi_suspend_channel(ndp);
  1318. break;
  1319. case ncsi_dev_state_config:
  1320. ncsi_configure_channel(ndp);
  1321. break;
  1322. default:
  1323. netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
  1324. nd->state);
  1325. }
  1326. }
  1327. int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
  1328. {
  1329. struct ncsi_channel *nc;
  1330. int old_state;
  1331. unsigned long flags;
  1332. spin_lock_irqsave(&ndp->lock, flags);
  1333. nc = list_first_or_null_rcu(&ndp->channel_queue,
  1334. struct ncsi_channel, link);
  1335. if (!nc) {
  1336. spin_unlock_irqrestore(&ndp->lock, flags);
  1337. goto out;
  1338. }
  1339. list_del_init(&nc->link);
  1340. spin_unlock_irqrestore(&ndp->lock, flags);
  1341. spin_lock_irqsave(&nc->lock, flags);
  1342. old_state = nc->state;
  1343. nc->state = NCSI_CHANNEL_INVISIBLE;
  1344. spin_unlock_irqrestore(&nc->lock, flags);
  1345. ndp->active_channel = nc;
  1346. ndp->active_package = nc->package;
  1347. switch (old_state) {
  1348. case NCSI_CHANNEL_INACTIVE:
  1349. ndp->ndev.state = ncsi_dev_state_config;
  1350. netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
  1351. nc->id);
  1352. ncsi_configure_channel(ndp);
  1353. break;
  1354. case NCSI_CHANNEL_ACTIVE:
  1355. ndp->ndev.state = ncsi_dev_state_suspend;
  1356. netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
  1357. nc->id);
  1358. ncsi_suspend_channel(ndp);
  1359. break;
  1360. default:
  1361. netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
  1362. old_state, nc->package->id, nc->id);
  1363. ncsi_report_link(ndp, false);
  1364. return -EINVAL;
  1365. }
  1366. return 0;
  1367. out:
  1368. ndp->active_channel = NULL;
  1369. ndp->active_package = NULL;
  1370. if (ndp->flags & NCSI_DEV_RESHUFFLE) {
  1371. ndp->flags &= ~NCSI_DEV_RESHUFFLE;
  1372. return ncsi_choose_active_channel(ndp);
  1373. }
  1374. ncsi_report_link(ndp, false);
  1375. return -ENODEV;
  1376. }
  1377. static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
  1378. {
  1379. struct ncsi_dev *nd = &ndp->ndev;
  1380. struct ncsi_channel *nc;
  1381. struct ncsi_package *np;
  1382. unsigned long flags;
  1383. unsigned int n = 0;
  1384. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1385. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1386. spin_lock_irqsave(&nc->lock, flags);
  1387. /* Channels may be busy, mark dirty instead of
  1388. * kicking if;
  1389. * a) not ACTIVE (configured)
  1390. * b) in the channel_queue (to be configured)
  1391. * c) it's ndev is in the config state
  1392. */
  1393. if (nc->state != NCSI_CHANNEL_ACTIVE) {
  1394. if ((ndp->ndev.state & 0xff00) ==
  1395. ncsi_dev_state_config ||
  1396. !list_empty(&nc->link)) {
  1397. netdev_dbg(nd->dev,
  1398. "NCSI: channel %p marked dirty\n",
  1399. nc);
  1400. nc->reconfigure_needed = true;
  1401. }
  1402. spin_unlock_irqrestore(&nc->lock, flags);
  1403. continue;
  1404. }
  1405. spin_unlock_irqrestore(&nc->lock, flags);
  1406. ncsi_stop_channel_monitor(nc);
  1407. spin_lock_irqsave(&nc->lock, flags);
  1408. nc->state = NCSI_CHANNEL_INACTIVE;
  1409. spin_unlock_irqrestore(&nc->lock, flags);
  1410. spin_lock_irqsave(&ndp->lock, flags);
  1411. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  1412. spin_unlock_irqrestore(&ndp->lock, flags);
  1413. netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
  1414. n++;
  1415. }
  1416. }
  1417. return n;
  1418. }
  1419. int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
  1420. {
  1421. struct ncsi_dev_priv *ndp;
  1422. unsigned int n_vids = 0;
  1423. struct vlan_vid *vlan;
  1424. struct ncsi_dev *nd;
  1425. bool found = false;
  1426. if (vid == 0)
  1427. return 0;
  1428. nd = ncsi_find_dev(dev);
  1429. if (!nd) {
  1430. netdev_warn(dev, "NCSI: No net_device?\n");
  1431. return 0;
  1432. }
  1433. ndp = TO_NCSI_DEV_PRIV(nd);
  1434. /* Add the VLAN id to our internal list */
  1435. list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
  1436. n_vids++;
  1437. if (vlan->vid == vid) {
  1438. netdev_dbg(dev, "NCSI: vid %u already registered\n",
  1439. vid);
  1440. return 0;
  1441. }
  1442. }
  1443. if (n_vids >= NCSI_MAX_VLAN_VIDS) {
  1444. netdev_warn(dev,
  1445. "tried to add vlan id %u but NCSI max already registered (%u)\n",
  1446. vid, NCSI_MAX_VLAN_VIDS);
  1447. return -ENOSPC;
  1448. }
  1449. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  1450. if (!vlan)
  1451. return -ENOMEM;
  1452. vlan->proto = proto;
  1453. vlan->vid = vid;
  1454. list_add_rcu(&vlan->list, &ndp->vlan_vids);
  1455. netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
  1456. found = ncsi_kick_channels(ndp) != 0;
  1457. return found ? ncsi_process_next_channel(ndp) : 0;
  1458. }
  1459. EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
  1460. int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
  1461. {
  1462. struct vlan_vid *vlan, *tmp;
  1463. struct ncsi_dev_priv *ndp;
  1464. struct ncsi_dev *nd;
  1465. bool found = false;
  1466. if (vid == 0)
  1467. return 0;
  1468. nd = ncsi_find_dev(dev);
  1469. if (!nd) {
  1470. netdev_warn(dev, "NCSI: no net_device?\n");
  1471. return 0;
  1472. }
  1473. ndp = TO_NCSI_DEV_PRIV(nd);
  1474. /* Remove the VLAN id from our internal list */
  1475. list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
  1476. if (vlan->vid == vid) {
  1477. netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
  1478. list_del_rcu(&vlan->list);
  1479. found = true;
  1480. kfree(vlan);
  1481. }
  1482. if (!found) {
  1483. netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
  1484. return -EINVAL;
  1485. }
  1486. found = ncsi_kick_channels(ndp) != 0;
  1487. return found ? ncsi_process_next_channel(ndp) : 0;
  1488. }
  1489. EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
  1490. struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
  1491. void (*handler)(struct ncsi_dev *ndev))
  1492. {
  1493. struct ncsi_dev_priv *ndp;
  1494. struct ncsi_dev *nd;
  1495. struct platform_device *pdev;
  1496. struct device_node *np;
  1497. unsigned long flags;
  1498. int i;
  1499. /* Check if the device has been registered or not */
  1500. nd = ncsi_find_dev(dev);
  1501. if (nd)
  1502. return nd;
  1503. /* Create NCSI device */
  1504. ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
  1505. if (!ndp)
  1506. return NULL;
  1507. nd = &ndp->ndev;
  1508. nd->state = ncsi_dev_state_registered;
  1509. nd->dev = dev;
  1510. nd->handler = handler;
  1511. ndp->pending_req_num = 0;
  1512. INIT_LIST_HEAD(&ndp->channel_queue);
  1513. INIT_LIST_HEAD(&ndp->vlan_vids);
  1514. INIT_WORK(&ndp->work, ncsi_dev_work);
  1515. ndp->package_whitelist = UINT_MAX;
  1516. /* Initialize private NCSI device */
  1517. spin_lock_init(&ndp->lock);
  1518. INIT_LIST_HEAD(&ndp->packages);
  1519. ndp->request_id = NCSI_REQ_START_IDX;
  1520. for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
  1521. ndp->requests[i].id = i;
  1522. ndp->requests[i].ndp = ndp;
  1523. timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
  1524. }
  1525. ndp->channel_count = NCSI_RESERVED_CHANNEL;
  1526. spin_lock_irqsave(&ncsi_dev_lock, flags);
  1527. list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
  1528. spin_unlock_irqrestore(&ncsi_dev_lock, flags);
  1529. /* Register NCSI packet Rx handler */
  1530. ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
  1531. ndp->ptype.func = ncsi_rcv_rsp;
  1532. ndp->ptype.dev = dev;
  1533. dev_add_pack(&ndp->ptype);
  1534. pdev = to_platform_device(dev->dev.parent);
  1535. if (pdev) {
  1536. np = pdev->dev.of_node;
  1537. if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
  1538. of_property_read_bool(np, "mlx,multi-host")))
  1539. ndp->mlx_multi_host = true;
  1540. }
  1541. return nd;
  1542. }
  1543. EXPORT_SYMBOL_GPL(ncsi_register_dev);
  1544. int ncsi_start_dev(struct ncsi_dev *nd)
  1545. {
  1546. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1547. if (nd->state != ncsi_dev_state_registered &&
  1548. nd->state != ncsi_dev_state_functional)
  1549. return -ENOTTY;
  1550. if (!(ndp->flags & NCSI_DEV_PROBED)) {
  1551. ndp->package_probe_id = 0;
  1552. ndp->channel_probe_id = 0;
  1553. nd->state = ncsi_dev_state_probe;
  1554. schedule_work(&ndp->work);
  1555. return 0;
  1556. }
  1557. return ncsi_reset_dev(nd);
  1558. }
  1559. EXPORT_SYMBOL_GPL(ncsi_start_dev);
  1560. void ncsi_stop_dev(struct ncsi_dev *nd)
  1561. {
  1562. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1563. struct ncsi_package *np;
  1564. struct ncsi_channel *nc;
  1565. bool chained;
  1566. int old_state;
  1567. unsigned long flags;
  1568. /* Stop the channel monitor on any active channels. Don't reset the
  1569. * channel state so we know which were active when ncsi_start_dev()
  1570. * is next called.
  1571. */
  1572. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1573. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1574. ncsi_stop_channel_monitor(nc);
  1575. spin_lock_irqsave(&nc->lock, flags);
  1576. chained = !list_empty(&nc->link);
  1577. old_state = nc->state;
  1578. spin_unlock_irqrestore(&nc->lock, flags);
  1579. WARN_ON_ONCE(chained ||
  1580. old_state == NCSI_CHANNEL_INVISIBLE);
  1581. }
  1582. }
  1583. netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
  1584. ncsi_report_link(ndp, true);
  1585. }
  1586. EXPORT_SYMBOL_GPL(ncsi_stop_dev);
  1587. int ncsi_reset_dev(struct ncsi_dev *nd)
  1588. {
  1589. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1590. struct ncsi_channel *nc, *active, *tmp;
  1591. struct ncsi_package *np;
  1592. unsigned long flags;
  1593. spin_lock_irqsave(&ndp->lock, flags);
  1594. if (!(ndp->flags & NCSI_DEV_RESET)) {
  1595. /* Haven't been called yet, check states */
  1596. switch (nd->state & ncsi_dev_state_major) {
  1597. case ncsi_dev_state_registered:
  1598. case ncsi_dev_state_probe:
  1599. /* Not even probed yet - do nothing */
  1600. spin_unlock_irqrestore(&ndp->lock, flags);
  1601. return 0;
  1602. case ncsi_dev_state_suspend:
  1603. case ncsi_dev_state_config:
  1604. /* Wait for the channel to finish its suspend/config
  1605. * operation; once it finishes it will check for
  1606. * NCSI_DEV_RESET and reset the state.
  1607. */
  1608. ndp->flags |= NCSI_DEV_RESET;
  1609. spin_unlock_irqrestore(&ndp->lock, flags);
  1610. return 0;
  1611. }
  1612. } else {
  1613. switch (nd->state) {
  1614. case ncsi_dev_state_suspend_done:
  1615. case ncsi_dev_state_config_done:
  1616. case ncsi_dev_state_functional:
  1617. /* Ok */
  1618. break;
  1619. default:
  1620. /* Current reset operation happening */
  1621. spin_unlock_irqrestore(&ndp->lock, flags);
  1622. return 0;
  1623. }
  1624. }
  1625. if (!list_empty(&ndp->channel_queue)) {
  1626. /* Clear any channel queue we may have interrupted */
  1627. list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
  1628. list_del_init(&nc->link);
  1629. }
  1630. spin_unlock_irqrestore(&ndp->lock, flags);
  1631. active = NULL;
  1632. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1633. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1634. spin_lock_irqsave(&nc->lock, flags);
  1635. if (nc->state == NCSI_CHANNEL_ACTIVE) {
  1636. active = nc;
  1637. nc->state = NCSI_CHANNEL_INVISIBLE;
  1638. spin_unlock_irqrestore(&nc->lock, flags);
  1639. ncsi_stop_channel_monitor(nc);
  1640. break;
  1641. }
  1642. spin_unlock_irqrestore(&nc->lock, flags);
  1643. }
  1644. if (active)
  1645. break;
  1646. }
  1647. if (!active) {
  1648. /* Done */
  1649. spin_lock_irqsave(&ndp->lock, flags);
  1650. ndp->flags &= ~NCSI_DEV_RESET;
  1651. spin_unlock_irqrestore(&ndp->lock, flags);
  1652. return ncsi_choose_active_channel(ndp);
  1653. }
  1654. spin_lock_irqsave(&ndp->lock, flags);
  1655. ndp->flags |= NCSI_DEV_RESET;
  1656. ndp->active_channel = active;
  1657. ndp->active_package = active->package;
  1658. spin_unlock_irqrestore(&ndp->lock, flags);
  1659. nd->state = ncsi_dev_state_suspend;
  1660. schedule_work(&ndp->work);
  1661. return 0;
  1662. }
  1663. void ncsi_unregister_dev(struct ncsi_dev *nd)
  1664. {
  1665. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1666. struct ncsi_package *np, *tmp;
  1667. unsigned long flags;
  1668. dev_remove_pack(&ndp->ptype);
  1669. list_for_each_entry_safe(np, tmp, &ndp->packages, node)
  1670. ncsi_remove_package(np);
  1671. spin_lock_irqsave(&ncsi_dev_lock, flags);
  1672. list_del_rcu(&ndp->node);
  1673. spin_unlock_irqrestore(&ncsi_dev_lock, flags);
  1674. disable_work_sync(&ndp->work);
  1675. kfree(ndp);
  1676. }
  1677. EXPORT_SYMBOL_GPL(ncsi_unregister_dev);