dev.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
  5. */
  6. #include <linux/device.h>
  7. #include <net/genetlink.h>
  8. #include <net/sock.h>
  9. #include "devl_internal.h"
  10. struct devlink_info_req {
  11. struct sk_buff *msg;
  12. void (*version_cb)(const char *version_name,
  13. enum devlink_info_version_type version_type,
  14. void *version_cb_priv);
  15. void *version_cb_priv;
  16. };
  17. struct devlink_reload_combination {
  18. enum devlink_reload_action action;
  19. enum devlink_reload_limit limit;
  20. };
  21. static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = {
  22. {
  23. /* can't reinitialize driver with no down time */
  24. .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
  25. .limit = DEVLINK_RELOAD_LIMIT_NO_RESET,
  26. },
  27. };
  28. static bool
  29. devlink_reload_combination_is_invalid(enum devlink_reload_action action,
  30. enum devlink_reload_limit limit)
  31. {
  32. int i;
  33. for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)
  34. if (devlink_reload_invalid_combinations[i].action == action &&
  35. devlink_reload_invalid_combinations[i].limit == limit)
  36. return true;
  37. return false;
  38. }
  39. static bool
  40. devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action)
  41. {
  42. return test_bit(action, &devlink->ops->reload_actions);
  43. }
  44. static bool
  45. devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit)
  46. {
  47. return test_bit(limit, &devlink->ops->reload_limits);
  48. }
  49. static int devlink_reload_stat_put(struct sk_buff *msg,
  50. enum devlink_reload_limit limit, u32 value)
  51. {
  52. struct nlattr *reload_stats_entry;
  53. reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY);
  54. if (!reload_stats_entry)
  55. return -EMSGSIZE;
  56. if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
  57. nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
  58. goto nla_put_failure;
  59. nla_nest_end(msg, reload_stats_entry);
  60. return 0;
  61. nla_put_failure:
  62. nla_nest_cancel(msg, reload_stats_entry);
  63. return -EMSGSIZE;
  64. }
  65. static int
  66. devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
  67. {
  68. struct nlattr *reload_stats_attr, *act_info, *act_stats;
  69. int i, j, stat_idx;
  70. u32 value;
  71. if (!is_remote)
  72. reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS);
  73. else
  74. reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS);
  75. if (!reload_stats_attr)
  76. return -EMSGSIZE;
  77. for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
  78. if ((!is_remote &&
  79. !devlink_reload_action_is_supported(devlink, i)) ||
  80. i == DEVLINK_RELOAD_ACTION_UNSPEC)
  81. continue;
  82. act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO);
  83. if (!act_info)
  84. goto nla_put_failure;
  85. if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i))
  86. goto action_info_nest_cancel;
  87. act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS);
  88. if (!act_stats)
  89. goto action_info_nest_cancel;
  90. for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
  91. /* Remote stats are shown even if not locally supported.
  92. * Stats of actions with unspecified limit are shown
  93. * though drivers don't need to register unspecified
  94. * limit.
  95. */
  96. if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
  97. !devlink_reload_limit_is_supported(devlink, j)) ||
  98. devlink_reload_combination_is_invalid(i, j))
  99. continue;
  100. stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i;
  101. if (!is_remote)
  102. value = devlink->stats.reload_stats[stat_idx];
  103. else
  104. value = devlink->stats.remote_reload_stats[stat_idx];
  105. if (devlink_reload_stat_put(msg, j, value))
  106. goto action_stats_nest_cancel;
  107. }
  108. nla_nest_end(msg, act_stats);
  109. nla_nest_end(msg, act_info);
  110. }
  111. nla_nest_end(msg, reload_stats_attr);
  112. return 0;
  113. action_stats_nest_cancel:
  114. nla_nest_cancel(msg, act_stats);
  115. action_info_nest_cancel:
  116. nla_nest_cancel(msg, act_info);
  117. nla_put_failure:
  118. nla_nest_cancel(msg, reload_stats_attr);
  119. return -EMSGSIZE;
  120. }
  121. static int devlink_nl_nested_fill(struct sk_buff *msg, struct devlink *devlink)
  122. {
  123. unsigned long rel_index;
  124. void *unused;
  125. int err;
  126. xa_for_each(&devlink->nested_rels, rel_index, unused) {
  127. err = devlink_rel_devlink_handle_put(msg, devlink,
  128. rel_index,
  129. DEVLINK_ATTR_NESTED_DEVLINK,
  130. NULL);
  131. if (err)
  132. return err;
  133. }
  134. return 0;
  135. }
  136. static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
  137. enum devlink_command cmd, u32 portid,
  138. u32 seq, int flags)
  139. {
  140. struct nlattr *dev_stats;
  141. void *hdr;
  142. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  143. if (!hdr)
  144. return -EMSGSIZE;
  145. if (devlink_nl_put_handle(msg, devlink))
  146. goto nla_put_failure;
  147. if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
  148. goto nla_put_failure;
  149. dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS);
  150. if (!dev_stats)
  151. goto nla_put_failure;
  152. if (devlink_reload_stats_put(msg, devlink, false))
  153. goto dev_stats_nest_cancel;
  154. if (devlink_reload_stats_put(msg, devlink, true))
  155. goto dev_stats_nest_cancel;
  156. nla_nest_end(msg, dev_stats);
  157. if (devlink_nl_nested_fill(msg, devlink))
  158. goto nla_put_failure;
  159. genlmsg_end(msg, hdr);
  160. return 0;
  161. dev_stats_nest_cancel:
  162. nla_nest_cancel(msg, dev_stats);
  163. nla_put_failure:
  164. genlmsg_cancel(msg, hdr);
  165. return -EMSGSIZE;
  166. }
  167. static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
  168. {
  169. struct sk_buff *msg;
  170. int err;
  171. WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
  172. WARN_ON(!devl_is_registered(devlink));
  173. if (!devlink_nl_notify_need(devlink))
  174. return;
  175. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  176. if (!msg)
  177. return;
  178. err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0);
  179. if (err) {
  180. nlmsg_free(msg);
  181. return;
  182. }
  183. devlink_nl_notify_send(devlink, msg);
  184. }
  185. int devlink_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
  186. {
  187. struct devlink *devlink = info->user_ptr[0];
  188. struct sk_buff *msg;
  189. int err;
  190. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  191. if (!msg)
  192. return -ENOMEM;
  193. err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
  194. info->snd_portid, info->snd_seq, 0);
  195. if (err) {
  196. nlmsg_free(msg);
  197. return err;
  198. }
  199. return genlmsg_reply(msg, info);
  200. }
  201. static int
  202. devlink_nl_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
  203. struct netlink_callback *cb, int flags)
  204. {
  205. return devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
  206. NETLINK_CB(cb->skb).portid,
  207. cb->nlh->nlmsg_seq, flags);
  208. }
  209. int devlink_nl_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
  210. {
  211. return devlink_nl_dumpit(msg, cb, devlink_nl_get_dump_one);
  212. }
  213. static void devlink_rel_notify_cb(struct devlink *devlink, u32 obj_index)
  214. {
  215. devlink_notify(devlink, DEVLINK_CMD_NEW);
  216. }
  217. static void devlink_rel_cleanup_cb(struct devlink *devlink, u32 obj_index,
  218. u32 rel_index)
  219. {
  220. xa_erase(&devlink->nested_rels, rel_index);
  221. }
  222. int devl_nested_devlink_set(struct devlink *devlink,
  223. struct devlink *nested_devlink)
  224. {
  225. u32 rel_index;
  226. int err;
  227. err = devlink_rel_nested_in_add(&rel_index, devlink->index, 0,
  228. devlink_rel_notify_cb,
  229. devlink_rel_cleanup_cb,
  230. nested_devlink);
  231. if (err)
  232. return err;
  233. return xa_insert(&devlink->nested_rels, rel_index,
  234. xa_mk_value(0), GFP_KERNEL);
  235. }
  236. EXPORT_SYMBOL_GPL(devl_nested_devlink_set);
  237. void devlink_notify_register(struct devlink *devlink)
  238. {
  239. devlink_notify(devlink, DEVLINK_CMD_NEW);
  240. devlink_linecards_notify_register(devlink);
  241. devlink_ports_notify_register(devlink);
  242. devlink_trap_policers_notify_register(devlink);
  243. devlink_trap_groups_notify_register(devlink);
  244. devlink_traps_notify_register(devlink);
  245. devlink_rates_notify_register(devlink);
  246. devlink_regions_notify_register(devlink);
  247. devlink_params_notify_register(devlink);
  248. }
  249. void devlink_notify_unregister(struct devlink *devlink)
  250. {
  251. devlink_params_notify_unregister(devlink);
  252. devlink_regions_notify_unregister(devlink);
  253. devlink_rates_notify_unregister(devlink);
  254. devlink_traps_notify_unregister(devlink);
  255. devlink_trap_groups_notify_unregister(devlink);
  256. devlink_trap_policers_notify_unregister(devlink);
  257. devlink_ports_notify_unregister(devlink);
  258. devlink_linecards_notify_unregister(devlink);
  259. devlink_notify(devlink, DEVLINK_CMD_DEL);
  260. }
  261. static void devlink_reload_failed_set(struct devlink *devlink,
  262. bool reload_failed)
  263. {
  264. if (devlink->reload_failed == reload_failed)
  265. return;
  266. devlink->reload_failed = reload_failed;
  267. devlink_notify(devlink, DEVLINK_CMD_NEW);
  268. }
  269. bool devlink_is_reload_failed(const struct devlink *devlink)
  270. {
  271. return devlink->reload_failed;
  272. }
  273. EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
  274. static void
  275. __devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats,
  276. enum devlink_reload_limit limit, u32 actions_performed)
  277. {
  278. unsigned long actions = actions_performed;
  279. int stat_idx;
  280. int action;
  281. for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) {
  282. stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action;
  283. reload_stats[stat_idx]++;
  284. }
  285. devlink_notify(devlink, DEVLINK_CMD_NEW);
  286. }
  287. static void
  288. devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit,
  289. u32 actions_performed)
  290. {
  291. __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit,
  292. actions_performed);
  293. }
  294. /**
  295. * devlink_remote_reload_actions_performed - Update devlink on reload actions
  296. * performed which are not a direct result of devlink reload call.
  297. *
  298. * This should be called by a driver after performing reload actions in case it was not
  299. * a result of devlink reload call. For example fw_activate was performed as a result
  300. * of devlink reload triggered fw_activate on another host.
  301. * The motivation for this function is to keep data on reload actions performed on this
  302. * function whether it was done due to direct devlink reload call or not.
  303. *
  304. * @devlink: devlink
  305. * @limit: reload limit
  306. * @actions_performed: bitmask of actions performed
  307. */
  308. void devlink_remote_reload_actions_performed(struct devlink *devlink,
  309. enum devlink_reload_limit limit,
  310. u32 actions_performed)
  311. {
  312. if (WARN_ON(!actions_performed ||
  313. actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
  314. actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) ||
  315. limit > DEVLINK_RELOAD_LIMIT_MAX))
  316. return;
  317. __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit,
  318. actions_performed);
  319. }
  320. EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed);
  321. static struct net *devlink_netns_get(struct sk_buff *skb,
  322. struct genl_info *info)
  323. {
  324. struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID];
  325. struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD];
  326. struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID];
  327. struct net *net;
  328. if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
  329. NL_SET_ERR_MSG(info->extack, "multiple netns identifying attributes specified");
  330. return ERR_PTR(-EINVAL);
  331. }
  332. if (netns_pid_attr) {
  333. net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr));
  334. } else if (netns_fd_attr) {
  335. net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr));
  336. } else if (netns_id_attr) {
  337. net = get_net_ns_by_id(sock_net(skb->sk),
  338. nla_get_u32(netns_id_attr));
  339. if (!net)
  340. net = ERR_PTR(-EINVAL);
  341. } else {
  342. WARN_ON(1);
  343. net = ERR_PTR(-EINVAL);
  344. }
  345. if (IS_ERR(net)) {
  346. NL_SET_ERR_MSG(info->extack, "Unknown network namespace");
  347. return ERR_PTR(-EINVAL);
  348. }
  349. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
  350. put_net(net);
  351. return ERR_PTR(-EPERM);
  352. }
  353. return net;
  354. }
  355. static void devlink_reload_netns_change(struct devlink *devlink,
  356. struct net *curr_net,
  357. struct net *dest_net)
  358. {
  359. /* Userspace needs to be notified about devlink objects
  360. * removed from original and entering new network namespace.
  361. * The rest of the devlink objects are re-created during
  362. * reload process so the notifications are generated separatelly.
  363. */
  364. devlink_notify_unregister(devlink);
  365. write_pnet(&devlink->_net, dest_net);
  366. devlink_notify_register(devlink);
  367. devlink_rel_nested_in_notify(devlink);
  368. }
  369. static void devlink_reload_reinit_sanity_check(struct devlink *devlink)
  370. {
  371. WARN_ON(!list_empty(&devlink->trap_policer_list));
  372. WARN_ON(!list_empty(&devlink->trap_group_list));
  373. WARN_ON(!list_empty(&devlink->trap_list));
  374. WARN_ON(!list_empty(&devlink->dpipe_table_list));
  375. WARN_ON(!list_empty(&devlink->sb_list));
  376. WARN_ON(!list_empty(&devlink->rate_list));
  377. WARN_ON(!list_empty(&devlink->linecard_list));
  378. WARN_ON(!xa_empty(&devlink->ports));
  379. }
  380. int devlink_reload(struct devlink *devlink, struct net *dest_net,
  381. enum devlink_reload_action action,
  382. enum devlink_reload_limit limit,
  383. u32 *actions_performed, struct netlink_ext_ack *extack)
  384. {
  385. u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
  386. struct net *curr_net;
  387. int err;
  388. /* Make sure the reload operations are invoked with the device lock
  389. * held to allow drivers to trigger functionality that expects it
  390. * (e.g., PCI reset) and to close possible races between these
  391. * operations and probe/remove.
  392. */
  393. device_lock_assert(devlink->dev);
  394. memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
  395. sizeof(remote_reload_stats));
  396. err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
  397. if (err)
  398. return err;
  399. curr_net = devlink_net(devlink);
  400. if (dest_net && !net_eq(dest_net, curr_net))
  401. devlink_reload_netns_change(devlink, curr_net, dest_net);
  402. if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) {
  403. devlink_params_driverinit_load_new(devlink);
  404. devlink_reload_reinit_sanity_check(devlink);
  405. }
  406. err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
  407. devlink_reload_failed_set(devlink, !!err);
  408. if (err)
  409. return err;
  410. WARN_ON(!(*actions_performed & BIT(action)));
  411. /* Catch driver on updating the remote action within devlink reload */
  412. WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
  413. sizeof(remote_reload_stats)));
  414. devlink_reload_stats_update(devlink, limit, *actions_performed);
  415. return 0;
  416. }
  417. static int
  418. devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed,
  419. enum devlink_command cmd, struct genl_info *info)
  420. {
  421. struct sk_buff *msg;
  422. void *hdr;
  423. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  424. if (!msg)
  425. return -ENOMEM;
  426. hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd);
  427. if (!hdr)
  428. goto free_msg;
  429. if (devlink_nl_put_handle(msg, devlink))
  430. goto nla_put_failure;
  431. if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed,
  432. actions_performed))
  433. goto nla_put_failure;
  434. genlmsg_end(msg, hdr);
  435. return genlmsg_reply(msg, info);
  436. nla_put_failure:
  437. genlmsg_cancel(msg, hdr);
  438. free_msg:
  439. nlmsg_free(msg);
  440. return -EMSGSIZE;
  441. }
  442. int devlink_nl_reload_doit(struct sk_buff *skb, struct genl_info *info)
  443. {
  444. struct devlink *devlink = info->user_ptr[0];
  445. enum devlink_reload_action action;
  446. enum devlink_reload_limit limit;
  447. struct net *dest_net = NULL;
  448. u32 actions_performed;
  449. int err;
  450. err = devlink_resources_validate(devlink, NULL, info);
  451. if (err) {
  452. NL_SET_ERR_MSG(info->extack, "resources size validation failed");
  453. return err;
  454. }
  455. if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
  456. action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
  457. else
  458. action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
  459. if (!devlink_reload_action_is_supported(devlink, action)) {
  460. NL_SET_ERR_MSG(info->extack, "Requested reload action is not supported by the driver");
  461. return -EOPNOTSUPP;
  462. }
  463. limit = DEVLINK_RELOAD_LIMIT_UNSPEC;
  464. if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) {
  465. struct nla_bitfield32 limits;
  466. u32 limits_selected;
  467. limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]);
  468. limits_selected = limits.value & limits.selector;
  469. if (!limits_selected) {
  470. NL_SET_ERR_MSG(info->extack, "Invalid limit selected");
  471. return -EINVAL;
  472. }
  473. for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++)
  474. if (limits_selected & BIT(limit))
  475. break;
  476. /* UAPI enables multiselection, but currently it is not used */
  477. if (limits_selected != BIT(limit)) {
  478. NL_SET_ERR_MSG(info->extack, "Multiselection of limit is not supported");
  479. return -EOPNOTSUPP;
  480. }
  481. if (!devlink_reload_limit_is_supported(devlink, limit)) {
  482. NL_SET_ERR_MSG(info->extack, "Requested limit is not supported by the driver");
  483. return -EOPNOTSUPP;
  484. }
  485. if (devlink_reload_combination_is_invalid(action, limit)) {
  486. NL_SET_ERR_MSG(info->extack, "Requested limit is invalid for this action");
  487. return -EINVAL;
  488. }
  489. }
  490. if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
  491. info->attrs[DEVLINK_ATTR_NETNS_FD] ||
  492. info->attrs[DEVLINK_ATTR_NETNS_ID]) {
  493. dest_net = devlink_netns_get(skb, info);
  494. if (IS_ERR(dest_net))
  495. return PTR_ERR(dest_net);
  496. if (!net_eq(dest_net, devlink_net(devlink)) &&
  497. action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT) {
  498. NL_SET_ERR_MSG_MOD(info->extack,
  499. "Changing namespace is only supported for reinit action");
  500. return -EOPNOTSUPP;
  501. }
  502. }
  503. err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
  504. if (dest_net)
  505. put_net(dest_net);
  506. if (err)
  507. return err;
  508. /* For backward compatibility generate reply only if attributes used by user */
  509. if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS])
  510. return 0;
  511. return devlink_nl_reload_actions_performed_snd(devlink, actions_performed,
  512. DEVLINK_CMD_RELOAD, info);
  513. }
  514. bool devlink_reload_actions_valid(const struct devlink_ops *ops)
  515. {
  516. const struct devlink_reload_combination *comb;
  517. int i;
  518. if (!devlink_reload_supported(ops)) {
  519. if (WARN_ON(ops->reload_actions))
  520. return false;
  521. return true;
  522. }
  523. if (WARN_ON(!ops->reload_actions ||
  524. ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
  525. ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX)))
  526. return false;
  527. if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) ||
  528. ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX)))
  529. return false;
  530. for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) {
  531. comb = &devlink_reload_invalid_combinations[i];
  532. if (ops->reload_actions == BIT(comb->action) &&
  533. ops->reload_limits == BIT(comb->limit))
  534. return false;
  535. }
  536. return true;
  537. }
  538. static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
  539. enum devlink_command cmd, u32 portid,
  540. u32 seq, int flags)
  541. {
  542. const struct devlink_ops *ops = devlink->ops;
  543. enum devlink_eswitch_encap_mode encap_mode;
  544. u8 inline_mode;
  545. void *hdr;
  546. int err = 0;
  547. u16 mode;
  548. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  549. if (!hdr)
  550. return -EMSGSIZE;
  551. err = devlink_nl_put_handle(msg, devlink);
  552. if (err)
  553. goto nla_put_failure;
  554. if (ops->eswitch_mode_get) {
  555. err = ops->eswitch_mode_get(devlink, &mode);
  556. if (err)
  557. goto nla_put_failure;
  558. err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
  559. if (err)
  560. goto nla_put_failure;
  561. }
  562. if (ops->eswitch_inline_mode_get) {
  563. err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
  564. if (err)
  565. goto nla_put_failure;
  566. err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
  567. inline_mode);
  568. if (err)
  569. goto nla_put_failure;
  570. }
  571. if (ops->eswitch_encap_mode_get) {
  572. err = ops->eswitch_encap_mode_get(devlink, &encap_mode);
  573. if (err)
  574. goto nla_put_failure;
  575. err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode);
  576. if (err)
  577. goto nla_put_failure;
  578. }
  579. genlmsg_end(msg, hdr);
  580. return 0;
  581. nla_put_failure:
  582. genlmsg_cancel(msg, hdr);
  583. return err;
  584. }
  585. int devlink_nl_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info)
  586. {
  587. struct devlink *devlink = info->user_ptr[0];
  588. struct sk_buff *msg;
  589. int err;
  590. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  591. if (!msg)
  592. return -ENOMEM;
  593. err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET,
  594. info->snd_portid, info->snd_seq, 0);
  595. if (err) {
  596. nlmsg_free(msg);
  597. return err;
  598. }
  599. return genlmsg_reply(msg, info);
  600. }
  601. int devlink_nl_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info)
  602. {
  603. struct devlink *devlink = info->user_ptr[0];
  604. const struct devlink_ops *ops = devlink->ops;
  605. enum devlink_eswitch_encap_mode encap_mode;
  606. u8 inline_mode;
  607. int err = 0;
  608. u16 mode;
  609. if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
  610. if (!ops->eswitch_mode_set)
  611. return -EOPNOTSUPP;
  612. mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
  613. err = devlink_rate_nodes_check(devlink, mode, info->extack);
  614. if (err)
  615. return err;
  616. err = ops->eswitch_mode_set(devlink, mode, info->extack);
  617. if (err)
  618. return err;
  619. }
  620. if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) {
  621. if (!ops->eswitch_inline_mode_set)
  622. return -EOPNOTSUPP;
  623. inline_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
  624. err = ops->eswitch_inline_mode_set(devlink, inline_mode,
  625. info->extack);
  626. if (err)
  627. return err;
  628. }
  629. if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) {
  630. if (!ops->eswitch_encap_mode_set)
  631. return -EOPNOTSUPP;
  632. encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
  633. err = ops->eswitch_encap_mode_set(devlink, encap_mode,
  634. info->extack);
  635. if (err)
  636. return err;
  637. }
  638. return 0;
  639. }
  640. int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
  641. {
  642. if (!req->msg)
  643. return 0;
  644. return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
  645. }
  646. EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
  647. int devlink_info_board_serial_number_put(struct devlink_info_req *req,
  648. const char *bsn)
  649. {
  650. if (!req->msg)
  651. return 0;
  652. return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,
  653. bsn);
  654. }
  655. EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put);
  656. static int devlink_info_version_put(struct devlink_info_req *req, int attr,
  657. const char *version_name,
  658. const char *version_value,
  659. enum devlink_info_version_type version_type)
  660. {
  661. struct nlattr *nest;
  662. int err;
  663. if (req->version_cb)
  664. req->version_cb(version_name, version_type,
  665. req->version_cb_priv);
  666. if (!req->msg)
  667. return 0;
  668. nest = nla_nest_start_noflag(req->msg, attr);
  669. if (!nest)
  670. return -EMSGSIZE;
  671. err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
  672. version_name);
  673. if (err)
  674. goto nla_put_failure;
  675. err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
  676. version_value);
  677. if (err)
  678. goto nla_put_failure;
  679. nla_nest_end(req->msg, nest);
  680. return 0;
  681. nla_put_failure:
  682. nla_nest_cancel(req->msg, nest);
  683. return err;
  684. }
  685. int devlink_info_version_fixed_put(struct devlink_info_req *req,
  686. const char *version_name,
  687. const char *version_value)
  688. {
  689. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
  690. version_name, version_value,
  691. DEVLINK_INFO_VERSION_TYPE_NONE);
  692. }
  693. EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
  694. int devlink_info_version_stored_put(struct devlink_info_req *req,
  695. const char *version_name,
  696. const char *version_value)
  697. {
  698. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
  699. version_name, version_value,
  700. DEVLINK_INFO_VERSION_TYPE_NONE);
  701. }
  702. EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
  703. int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
  704. const char *version_name,
  705. const char *version_value,
  706. enum devlink_info_version_type version_type)
  707. {
  708. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
  709. version_name, version_value,
  710. version_type);
  711. }
  712. EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext);
  713. int devlink_info_version_running_put(struct devlink_info_req *req,
  714. const char *version_name,
  715. const char *version_value)
  716. {
  717. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
  718. version_name, version_value,
  719. DEVLINK_INFO_VERSION_TYPE_NONE);
  720. }
  721. EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
  722. int devlink_info_version_running_put_ext(struct devlink_info_req *req,
  723. const char *version_name,
  724. const char *version_value,
  725. enum devlink_info_version_type version_type)
  726. {
  727. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
  728. version_name, version_value,
  729. version_type);
  730. }
  731. EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext);
  732. static int devlink_nl_driver_info_get(struct device_driver *drv,
  733. struct devlink_info_req *req)
  734. {
  735. if (!drv)
  736. return 0;
  737. if (drv->name[0])
  738. return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME,
  739. drv->name);
  740. return 0;
  741. }
  742. static int
  743. devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
  744. enum devlink_command cmd, u32 portid,
  745. u32 seq, int flags, struct netlink_ext_ack *extack)
  746. {
  747. struct device *dev = devlink_to_dev(devlink);
  748. struct devlink_info_req req = {};
  749. void *hdr;
  750. int err;
  751. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  752. if (!hdr)
  753. return -EMSGSIZE;
  754. err = -EMSGSIZE;
  755. if (devlink_nl_put_handle(msg, devlink))
  756. goto err_cancel_msg;
  757. req.msg = msg;
  758. if (devlink->ops->info_get) {
  759. err = devlink->ops->info_get(devlink, &req, extack);
  760. if (err)
  761. goto err_cancel_msg;
  762. }
  763. err = devlink_nl_driver_info_get(dev->driver, &req);
  764. if (err)
  765. goto err_cancel_msg;
  766. genlmsg_end(msg, hdr);
  767. return 0;
  768. err_cancel_msg:
  769. genlmsg_cancel(msg, hdr);
  770. return err;
  771. }
  772. int devlink_nl_info_get_doit(struct sk_buff *skb, struct genl_info *info)
  773. {
  774. struct devlink *devlink = info->user_ptr[0];
  775. struct sk_buff *msg;
  776. int err;
  777. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  778. if (!msg)
  779. return -ENOMEM;
  780. err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
  781. info->snd_portid, info->snd_seq, 0,
  782. info->extack);
  783. if (err) {
  784. nlmsg_free(msg);
  785. return err;
  786. }
  787. return genlmsg_reply(msg, info);
  788. }
  789. static int
  790. devlink_nl_info_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
  791. struct netlink_callback *cb, int flags)
  792. {
  793. int err;
  794. err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
  795. NETLINK_CB(cb->skb).portid,
  796. cb->nlh->nlmsg_seq, flags,
  797. cb->extack);
  798. if (err == -EOPNOTSUPP)
  799. err = 0;
  800. return err;
  801. }
  802. int devlink_nl_info_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
  803. {
  804. return devlink_nl_dumpit(msg, cb, devlink_nl_info_get_dump_one);
  805. }
  806. static int devlink_nl_flash_update_fill(struct sk_buff *msg,
  807. struct devlink *devlink,
  808. enum devlink_command cmd,
  809. struct devlink_flash_notify *params)
  810. {
  811. void *hdr;
  812. hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
  813. if (!hdr)
  814. return -EMSGSIZE;
  815. if (devlink_nl_put_handle(msg, devlink))
  816. goto nla_put_failure;
  817. if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
  818. goto out;
  819. if (params->status_msg &&
  820. nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
  821. params->status_msg))
  822. goto nla_put_failure;
  823. if (params->component &&
  824. nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
  825. params->component))
  826. goto nla_put_failure;
  827. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
  828. params->done, DEVLINK_ATTR_PAD))
  829. goto nla_put_failure;
  830. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
  831. params->total, DEVLINK_ATTR_PAD))
  832. goto nla_put_failure;
  833. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
  834. params->timeout, DEVLINK_ATTR_PAD))
  835. goto nla_put_failure;
  836. out:
  837. genlmsg_end(msg, hdr);
  838. return 0;
  839. nla_put_failure:
  840. genlmsg_cancel(msg, hdr);
  841. return -EMSGSIZE;
  842. }
  843. static void __devlink_flash_update_notify(struct devlink *devlink,
  844. enum devlink_command cmd,
  845. struct devlink_flash_notify *params)
  846. {
  847. struct sk_buff *msg;
  848. int err;
  849. WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
  850. cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
  851. cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
  852. if (!devl_is_registered(devlink) || !devlink_nl_notify_need(devlink))
  853. return;
  854. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  855. if (!msg)
  856. return;
  857. err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
  858. if (err)
  859. goto out_free_msg;
  860. devlink_nl_notify_send(devlink, msg);
  861. return;
  862. out_free_msg:
  863. nlmsg_free(msg);
  864. }
  865. static void devlink_flash_update_begin_notify(struct devlink *devlink)
  866. {
  867. struct devlink_flash_notify params = {};
  868. __devlink_flash_update_notify(devlink,
  869. DEVLINK_CMD_FLASH_UPDATE,
  870. &params);
  871. }
  872. static void devlink_flash_update_end_notify(struct devlink *devlink)
  873. {
  874. struct devlink_flash_notify params = {};
  875. __devlink_flash_update_notify(devlink,
  876. DEVLINK_CMD_FLASH_UPDATE_END,
  877. &params);
  878. }
  879. void devlink_flash_update_status_notify(struct devlink *devlink,
  880. const char *status_msg,
  881. const char *component,
  882. unsigned long done,
  883. unsigned long total)
  884. {
  885. struct devlink_flash_notify params = {
  886. .status_msg = status_msg,
  887. .component = component,
  888. .done = done,
  889. .total = total,
  890. };
  891. __devlink_flash_update_notify(devlink,
  892. DEVLINK_CMD_FLASH_UPDATE_STATUS,
  893. &params);
  894. }
  895. EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
  896. void devlink_flash_update_timeout_notify(struct devlink *devlink,
  897. const char *status_msg,
  898. const char *component,
  899. unsigned long timeout)
  900. {
  901. struct devlink_flash_notify params = {
  902. .status_msg = status_msg,
  903. .component = component,
  904. .timeout = timeout,
  905. };
  906. __devlink_flash_update_notify(devlink,
  907. DEVLINK_CMD_FLASH_UPDATE_STATUS,
  908. &params);
  909. }
  910. EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
  911. struct devlink_flash_component_lookup_ctx {
  912. const char *lookup_name;
  913. bool lookup_name_found;
  914. };
  915. static void
  916. devlink_flash_component_lookup_cb(const char *version_name,
  917. enum devlink_info_version_type version_type,
  918. void *version_cb_priv)
  919. {
  920. struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv;
  921. if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT ||
  922. lookup_ctx->lookup_name_found)
  923. return;
  924. lookup_ctx->lookup_name_found =
  925. !strcmp(lookup_ctx->lookup_name, version_name);
  926. }
  927. static int devlink_flash_component_get(struct devlink *devlink,
  928. struct nlattr *nla_component,
  929. const char **p_component,
  930. struct netlink_ext_ack *extack)
  931. {
  932. struct devlink_flash_component_lookup_ctx lookup_ctx = {};
  933. struct devlink_info_req req = {};
  934. const char *component;
  935. int ret;
  936. if (!nla_component)
  937. return 0;
  938. component = nla_data(nla_component);
  939. if (!devlink->ops->info_get) {
  940. NL_SET_ERR_MSG_ATTR(extack, nla_component,
  941. "component update is not supported by this device");
  942. return -EOPNOTSUPP;
  943. }
  944. lookup_ctx.lookup_name = component;
  945. req.version_cb = devlink_flash_component_lookup_cb;
  946. req.version_cb_priv = &lookup_ctx;
  947. ret = devlink->ops->info_get(devlink, &req, NULL);
  948. if (ret)
  949. return ret;
  950. if (!lookup_ctx.lookup_name_found) {
  951. NL_SET_ERR_MSG_ATTR(extack, nla_component,
  952. "selected component is not supported by this device");
  953. return -EINVAL;
  954. }
  955. *p_component = component;
  956. return 0;
  957. }
  958. int devlink_nl_flash_update_doit(struct sk_buff *skb, struct genl_info *info)
  959. {
  960. struct nlattr *nla_overwrite_mask, *nla_file_name;
  961. struct devlink_flash_update_params params = {};
  962. struct devlink *devlink = info->user_ptr[0];
  963. const char *file_name;
  964. u32 supported_params;
  965. int ret;
  966. if (!devlink->ops->flash_update)
  967. return -EOPNOTSUPP;
  968. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME))
  969. return -EINVAL;
  970. ret = devlink_flash_component_get(devlink,
  971. info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT],
  972. &params.component, info->extack);
  973. if (ret)
  974. return ret;
  975. supported_params = devlink->ops->supported_flash_update_params;
  976. nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK];
  977. if (nla_overwrite_mask) {
  978. struct nla_bitfield32 sections;
  979. if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) {
  980. NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask,
  981. "overwrite settings are not supported by this device");
  982. return -EOPNOTSUPP;
  983. }
  984. sections = nla_get_bitfield32(nla_overwrite_mask);
  985. params.overwrite_mask = sections.value & sections.selector;
  986. }
  987. nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
  988. file_name = nla_data(nla_file_name);
  989. ret = request_firmware(&params.fw, file_name, devlink->dev);
  990. if (ret) {
  991. NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name,
  992. "failed to locate the requested firmware file");
  993. return ret;
  994. }
  995. devlink_flash_update_begin_notify(devlink);
  996. ret = devlink->ops->flash_update(devlink, &params, info->extack);
  997. devlink_flash_update_end_notify(devlink);
  998. release_firmware(params.fw);
  999. return ret;
  1000. }
  1001. static void __devlink_compat_running_version(struct devlink *devlink,
  1002. char *buf, size_t len)
  1003. {
  1004. struct devlink_info_req req = {};
  1005. const struct nlattr *nlattr;
  1006. struct sk_buff *msg;
  1007. int rem, err;
  1008. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1009. if (!msg)
  1010. return;
  1011. req.msg = msg;
  1012. err = devlink->ops->info_get(devlink, &req, NULL);
  1013. if (err)
  1014. goto free_msg;
  1015. nla_for_each_attr_type(nlattr, DEVLINK_ATTR_INFO_VERSION_RUNNING,
  1016. (void *)msg->data, msg->len, rem) {
  1017. const struct nlattr *kv;
  1018. int rem_kv;
  1019. nla_for_each_nested_type(kv, DEVLINK_ATTR_INFO_VERSION_VALUE,
  1020. nlattr, rem_kv) {
  1021. strlcat(buf, nla_data(kv), len);
  1022. strlcat(buf, " ", len);
  1023. }
  1024. }
  1025. free_msg:
  1026. nlmsg_consume(msg);
  1027. }
  1028. void devlink_compat_running_version(struct devlink *devlink,
  1029. char *buf, size_t len)
  1030. {
  1031. if (!devlink->ops->info_get)
  1032. return;
  1033. devl_lock(devlink);
  1034. if (devl_is_registered(devlink))
  1035. __devlink_compat_running_version(devlink, buf, len);
  1036. devl_unlock(devlink);
  1037. }
  1038. int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
  1039. {
  1040. struct devlink_flash_update_params params = {};
  1041. int ret;
  1042. devl_lock(devlink);
  1043. if (!devl_is_registered(devlink)) {
  1044. ret = -ENODEV;
  1045. goto out_unlock;
  1046. }
  1047. if (!devlink->ops->flash_update) {
  1048. ret = -EOPNOTSUPP;
  1049. goto out_unlock;
  1050. }
  1051. ret = request_firmware(&params.fw, file_name, devlink->dev);
  1052. if (ret)
  1053. goto out_unlock;
  1054. devlink_flash_update_begin_notify(devlink);
  1055. ret = devlink->ops->flash_update(devlink, &params, NULL);
  1056. devlink_flash_update_end_notify(devlink);
  1057. release_firmware(params.fw);
  1058. out_unlock:
  1059. devl_unlock(devlink);
  1060. return ret;
  1061. }
  1062. static int
  1063. devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
  1064. u32 portid, u32 seq, int flags,
  1065. struct netlink_ext_ack *extack)
  1066. {
  1067. struct nlattr *selftests;
  1068. void *hdr;
  1069. int err;
  1070. int i;
  1071. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
  1072. DEVLINK_CMD_SELFTESTS_GET);
  1073. if (!hdr)
  1074. return -EMSGSIZE;
  1075. err = -EMSGSIZE;
  1076. if (devlink_nl_put_handle(msg, devlink))
  1077. goto err_cancel_msg;
  1078. selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
  1079. if (!selftests)
  1080. goto err_cancel_msg;
  1081. for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
  1082. i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
  1083. if (devlink->ops->selftest_check(devlink, i, extack)) {
  1084. err = nla_put_flag(msg, i);
  1085. if (err)
  1086. goto err_cancel_msg;
  1087. }
  1088. }
  1089. nla_nest_end(msg, selftests);
  1090. genlmsg_end(msg, hdr);
  1091. return 0;
  1092. err_cancel_msg:
  1093. genlmsg_cancel(msg, hdr);
  1094. return err;
  1095. }
  1096. int devlink_nl_selftests_get_doit(struct sk_buff *skb, struct genl_info *info)
  1097. {
  1098. struct devlink *devlink = info->user_ptr[0];
  1099. struct sk_buff *msg;
  1100. int err;
  1101. if (!devlink->ops->selftest_check)
  1102. return -EOPNOTSUPP;
  1103. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1104. if (!msg)
  1105. return -ENOMEM;
  1106. err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
  1107. info->snd_seq, 0, info->extack);
  1108. if (err) {
  1109. nlmsg_free(msg);
  1110. return err;
  1111. }
  1112. return genlmsg_reply(msg, info);
  1113. }
  1114. static int devlink_nl_selftests_get_dump_one(struct sk_buff *msg,
  1115. struct devlink *devlink,
  1116. struct netlink_callback *cb,
  1117. int flags)
  1118. {
  1119. if (!devlink->ops->selftest_check)
  1120. return 0;
  1121. return devlink_nl_selftests_fill(msg, devlink,
  1122. NETLINK_CB(cb->skb).portid,
  1123. cb->nlh->nlmsg_seq, flags,
  1124. cb->extack);
  1125. }
  1126. int devlink_nl_selftests_get_dumpit(struct sk_buff *skb,
  1127. struct netlink_callback *cb)
  1128. {
  1129. return devlink_nl_dumpit(skb, cb, devlink_nl_selftests_get_dump_one);
  1130. }
  1131. static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
  1132. enum devlink_selftest_status test_status)
  1133. {
  1134. struct nlattr *result_attr;
  1135. result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
  1136. if (!result_attr)
  1137. return -EMSGSIZE;
  1138. if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
  1139. nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
  1140. test_status))
  1141. goto nla_put_failure;
  1142. nla_nest_end(skb, result_attr);
  1143. return 0;
  1144. nla_put_failure:
  1145. nla_nest_cancel(skb, result_attr);
  1146. return -EMSGSIZE;
  1147. }
  1148. static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
  1149. [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
  1150. };
  1151. int devlink_nl_selftests_run_doit(struct sk_buff *skb, struct genl_info *info)
  1152. {
  1153. struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
  1154. struct devlink *devlink = info->user_ptr[0];
  1155. struct nlattr *attrs, *selftests;
  1156. struct sk_buff *msg;
  1157. void *hdr;
  1158. int err;
  1159. int i;
  1160. if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
  1161. return -EOPNOTSUPP;
  1162. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS))
  1163. return -EINVAL;
  1164. attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
  1165. err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
  1166. devlink_selftest_nl_policy, info->extack);
  1167. if (err < 0)
  1168. return err;
  1169. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1170. if (!msg)
  1171. return -ENOMEM;
  1172. err = -EMSGSIZE;
  1173. hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
  1174. &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
  1175. if (!hdr)
  1176. goto free_msg;
  1177. if (devlink_nl_put_handle(msg, devlink))
  1178. goto genlmsg_cancel;
  1179. selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
  1180. if (!selftests)
  1181. goto genlmsg_cancel;
  1182. for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
  1183. i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
  1184. enum devlink_selftest_status test_status;
  1185. if (nla_get_flag(tb[i])) {
  1186. if (!devlink->ops->selftest_check(devlink, i,
  1187. info->extack)) {
  1188. if (devlink_selftest_result_put(msg, i,
  1189. DEVLINK_SELFTEST_STATUS_SKIP))
  1190. goto selftests_nest_cancel;
  1191. continue;
  1192. }
  1193. test_status = devlink->ops->selftest_run(devlink, i,
  1194. info->extack);
  1195. if (devlink_selftest_result_put(msg, i, test_status))
  1196. goto selftests_nest_cancel;
  1197. }
  1198. }
  1199. nla_nest_end(msg, selftests);
  1200. genlmsg_end(msg, hdr);
  1201. return genlmsg_reply(msg, info);
  1202. selftests_nest_cancel:
  1203. nla_nest_cancel(msg, selftests);
  1204. genlmsg_cancel:
  1205. genlmsg_cancel(msg, hdr);
  1206. free_msg:
  1207. nlmsg_free(msg);
  1208. return err;
  1209. }