genetlink.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NETLINK Generic Netlink Family
  4. *
  5. * Authors: Jamal Hadi Salim
  6. * Thomas Graf <tgraf@suug.ch>
  7. * Johannes Berg <johannes@sipsolutions.net>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/slab.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/socket.h>
  15. #include <linux/string_helpers.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/mutex.h>
  18. #include <linux/bitmap.h>
  19. #include <linux/rwsem.h>
  20. #include <linux/idr.h>
  21. #include <net/sock.h>
  22. #include <net/genetlink.h>
  23. #include "genetlink.h"
  24. static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
  25. static DECLARE_RWSEM(cb_lock);
  26. atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
  27. DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
  28. void genl_lock(void)
  29. {
  30. mutex_lock(&genl_mutex);
  31. }
  32. EXPORT_SYMBOL(genl_lock);
  33. void genl_unlock(void)
  34. {
  35. mutex_unlock(&genl_mutex);
  36. }
  37. EXPORT_SYMBOL(genl_unlock);
  38. static void genl_lock_all(void)
  39. {
  40. down_write(&cb_lock);
  41. genl_lock();
  42. }
  43. static void genl_unlock_all(void)
  44. {
  45. genl_unlock();
  46. up_write(&cb_lock);
  47. }
  48. static void genl_op_lock(const struct genl_family *family)
  49. {
  50. if (!family->parallel_ops)
  51. genl_lock();
  52. }
  53. static void genl_op_unlock(const struct genl_family *family)
  54. {
  55. if (!family->parallel_ops)
  56. genl_unlock();
  57. }
  58. static DEFINE_IDR(genl_fam_idr);
  59. /*
  60. * Bitmap of multicast groups that are currently in use.
  61. *
  62. * To avoid an allocation at boot of just one unsigned long,
  63. * declare it global instead.
  64. * Bit 0 is marked as already used since group 0 is invalid.
  65. * Bit 1 is marked as already used since the drop-monitor code
  66. * abuses the API and thinks it can statically use group 1.
  67. * That group will typically conflict with other groups that
  68. * any proper users use.
  69. * Bit 16 is marked as used since it's used for generic netlink
  70. * and the code no longer marks pre-reserved IDs as used.
  71. * Bit 17 is marked as already used since the VFS quota code
  72. * also abused this API and relied on family == group ID, we
  73. * cater to that by giving it a static family and group ID.
  74. * Bit 18 is marked as already used since the PMCRAID driver
  75. * did the same thing as the VFS quota code (maybe copied?)
  76. */
  77. static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
  78. BIT(GENL_ID_VFS_DQUOT) |
  79. BIT(GENL_ID_PMCRAID);
  80. static unsigned long *mc_groups = &mc_group_start;
  81. static unsigned long mc_groups_longs = 1;
  82. /* We need the last attribute with non-zero ID therefore a 2-entry array */
  83. static struct nla_policy genl_policy_reject_all[] = {
  84. { .type = NLA_REJECT },
  85. { .type = NLA_REJECT },
  86. };
  87. static int genl_ctrl_event(int event, const struct genl_family *family,
  88. const struct genl_multicast_group *grp,
  89. int grp_id);
  90. static void
  91. genl_op_fill_in_reject_policy(const struct genl_family *family,
  92. struct genl_ops *op)
  93. {
  94. BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
  95. if (op->policy || op->cmd < family->resv_start_op)
  96. return;
  97. op->policy = genl_policy_reject_all;
  98. op->maxattr = 1;
  99. }
  100. static void
  101. genl_op_fill_in_reject_policy_split(const struct genl_family *family,
  102. struct genl_split_ops *op)
  103. {
  104. if (op->policy)
  105. return;
  106. op->policy = genl_policy_reject_all;
  107. op->maxattr = 1;
  108. }
  109. static const struct genl_family *genl_family_find_byid(unsigned int id)
  110. {
  111. return idr_find(&genl_fam_idr, id);
  112. }
  113. static const struct genl_family *genl_family_find_byname(char *name)
  114. {
  115. const struct genl_family *family;
  116. unsigned int id;
  117. idr_for_each_entry(&genl_fam_idr, family, id)
  118. if (strcmp(family->name, name) == 0)
  119. return family;
  120. return NULL;
  121. }
  122. struct genl_op_iter {
  123. const struct genl_family *family;
  124. struct genl_split_ops doit;
  125. struct genl_split_ops dumpit;
  126. int cmd_idx;
  127. int entry_idx;
  128. u32 cmd;
  129. u8 flags;
  130. };
  131. static void genl_op_from_full(const struct genl_family *family,
  132. unsigned int i, struct genl_ops *op)
  133. {
  134. *op = family->ops[i];
  135. if (!op->maxattr)
  136. op->maxattr = family->maxattr;
  137. if (!op->policy)
  138. op->policy = family->policy;
  139. genl_op_fill_in_reject_policy(family, op);
  140. }
  141. static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
  142. struct genl_ops *op)
  143. {
  144. int i;
  145. for (i = 0; i < family->n_ops; i++)
  146. if (family->ops[i].cmd == cmd) {
  147. genl_op_from_full(family, i, op);
  148. return 0;
  149. }
  150. return -ENOENT;
  151. }
  152. static void genl_op_from_small(const struct genl_family *family,
  153. unsigned int i, struct genl_ops *op)
  154. {
  155. memset(op, 0, sizeof(*op));
  156. op->doit = family->small_ops[i].doit;
  157. op->dumpit = family->small_ops[i].dumpit;
  158. op->cmd = family->small_ops[i].cmd;
  159. op->internal_flags = family->small_ops[i].internal_flags;
  160. op->flags = family->small_ops[i].flags;
  161. op->validate = family->small_ops[i].validate;
  162. op->maxattr = family->maxattr;
  163. op->policy = family->policy;
  164. genl_op_fill_in_reject_policy(family, op);
  165. }
  166. static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
  167. struct genl_ops *op)
  168. {
  169. int i;
  170. for (i = 0; i < family->n_small_ops; i++)
  171. if (family->small_ops[i].cmd == cmd) {
  172. genl_op_from_small(family, i, op);
  173. return 0;
  174. }
  175. return -ENOENT;
  176. }
  177. static void genl_op_from_split(struct genl_op_iter *iter)
  178. {
  179. const struct genl_family *family = iter->family;
  180. int i, cnt = 0;
  181. i = iter->entry_idx - family->n_ops - family->n_small_ops;
  182. if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
  183. iter->doit = family->split_ops[i + cnt];
  184. genl_op_fill_in_reject_policy_split(family, &iter->doit);
  185. cnt++;
  186. } else {
  187. memset(&iter->doit, 0, sizeof(iter->doit));
  188. }
  189. if (i + cnt < family->n_split_ops &&
  190. family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP &&
  191. (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
  192. iter->dumpit = family->split_ops[i + cnt];
  193. genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
  194. cnt++;
  195. } else {
  196. memset(&iter->dumpit, 0, sizeof(iter->dumpit));
  197. }
  198. WARN_ON(!cnt);
  199. iter->entry_idx += cnt;
  200. }
  201. static int
  202. genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
  203. struct genl_split_ops *op)
  204. {
  205. int i;
  206. for (i = 0; i < family->n_split_ops; i++)
  207. if (family->split_ops[i].cmd == cmd &&
  208. family->split_ops[i].flags & flag) {
  209. *op = family->split_ops[i];
  210. return 0;
  211. }
  212. return -ENOENT;
  213. }
  214. static int
  215. genl_cmd_full_to_split(struct genl_split_ops *op,
  216. const struct genl_family *family,
  217. const struct genl_ops *full, u8 flags)
  218. {
  219. if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
  220. (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
  221. memset(op, 0, sizeof(*op));
  222. return -ENOENT;
  223. }
  224. if (flags & GENL_CMD_CAP_DUMP) {
  225. op->start = full->start;
  226. op->dumpit = full->dumpit;
  227. op->done = full->done;
  228. } else {
  229. op->pre_doit = family->pre_doit;
  230. op->doit = full->doit;
  231. op->post_doit = family->post_doit;
  232. }
  233. if (flags & GENL_CMD_CAP_DUMP &&
  234. full->validate & GENL_DONT_VALIDATE_DUMP) {
  235. op->policy = NULL;
  236. op->maxattr = 0;
  237. } else {
  238. op->policy = full->policy;
  239. op->maxattr = full->maxattr;
  240. }
  241. op->cmd = full->cmd;
  242. op->internal_flags = full->internal_flags;
  243. op->flags = full->flags;
  244. op->validate = full->validate;
  245. /* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
  246. op->flags |= flags;
  247. return 0;
  248. }
  249. /* Must make sure that op is initialized to 0 on failure */
  250. static int
  251. genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
  252. struct genl_split_ops *op)
  253. {
  254. struct genl_ops full;
  255. int err;
  256. err = genl_get_cmd_full(cmd, family, &full);
  257. if (err == -ENOENT)
  258. err = genl_get_cmd_small(cmd, family, &full);
  259. /* Found one of legacy forms */
  260. if (err == 0)
  261. return genl_cmd_full_to_split(op, family, &full, flags);
  262. err = genl_get_cmd_split(cmd, flags, family, op);
  263. if (err)
  264. memset(op, 0, sizeof(*op));
  265. return err;
  266. }
  267. /* For policy dumping only, get ops of both do and dump.
  268. * Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
  269. */
  270. static int
  271. genl_get_cmd_both(u32 cmd, const struct genl_family *family,
  272. struct genl_split_ops *doit, struct genl_split_ops *dumpit)
  273. {
  274. int err1, err2;
  275. err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
  276. err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
  277. return err1 && err2 ? -ENOENT : 0;
  278. }
  279. static bool
  280. genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
  281. {
  282. iter->family = family;
  283. iter->cmd_idx = 0;
  284. iter->entry_idx = 0;
  285. iter->flags = 0;
  286. return iter->family->n_ops +
  287. iter->family->n_small_ops +
  288. iter->family->n_split_ops;
  289. }
  290. static bool genl_op_iter_next(struct genl_op_iter *iter)
  291. {
  292. const struct genl_family *family = iter->family;
  293. bool legacy_op = true;
  294. struct genl_ops op;
  295. if (iter->entry_idx < family->n_ops) {
  296. genl_op_from_full(family, iter->entry_idx, &op);
  297. } else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
  298. genl_op_from_small(family, iter->entry_idx - family->n_ops,
  299. &op);
  300. } else if (iter->entry_idx <
  301. family->n_ops + family->n_small_ops + family->n_split_ops) {
  302. legacy_op = false;
  303. /* updates entry_idx */
  304. genl_op_from_split(iter);
  305. } else {
  306. return false;
  307. }
  308. iter->cmd_idx++;
  309. if (legacy_op) {
  310. iter->entry_idx++;
  311. genl_cmd_full_to_split(&iter->doit, family,
  312. &op, GENL_CMD_CAP_DO);
  313. genl_cmd_full_to_split(&iter->dumpit, family,
  314. &op, GENL_CMD_CAP_DUMP);
  315. }
  316. iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
  317. iter->flags = iter->doit.flags | iter->dumpit.flags;
  318. return true;
  319. }
  320. static void
  321. genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
  322. {
  323. *dst = *src;
  324. }
  325. static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
  326. {
  327. return iter->cmd_idx;
  328. }
  329. static int genl_allocate_reserve_groups(int n_groups, int *first_id)
  330. {
  331. unsigned long *new_groups;
  332. int start = 0;
  333. int i;
  334. int id;
  335. bool fits;
  336. do {
  337. if (start == 0)
  338. id = find_first_zero_bit(mc_groups,
  339. mc_groups_longs *
  340. BITS_PER_LONG);
  341. else
  342. id = find_next_zero_bit(mc_groups,
  343. mc_groups_longs * BITS_PER_LONG,
  344. start);
  345. fits = true;
  346. for (i = id;
  347. i < min_t(int, id + n_groups,
  348. mc_groups_longs * BITS_PER_LONG);
  349. i++) {
  350. if (test_bit(i, mc_groups)) {
  351. start = i;
  352. fits = false;
  353. break;
  354. }
  355. }
  356. if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
  357. unsigned long new_longs = mc_groups_longs +
  358. BITS_TO_LONGS(n_groups);
  359. size_t nlen = new_longs * sizeof(unsigned long);
  360. if (mc_groups == &mc_group_start) {
  361. new_groups = kzalloc(nlen, GFP_KERNEL);
  362. if (!new_groups)
  363. return -ENOMEM;
  364. mc_groups = new_groups;
  365. *mc_groups = mc_group_start;
  366. } else {
  367. new_groups = krealloc(mc_groups, nlen,
  368. GFP_KERNEL);
  369. if (!new_groups)
  370. return -ENOMEM;
  371. mc_groups = new_groups;
  372. for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
  373. mc_groups[mc_groups_longs + i] = 0;
  374. }
  375. mc_groups_longs = new_longs;
  376. }
  377. } while (!fits);
  378. for (i = id; i < id + n_groups; i++)
  379. set_bit(i, mc_groups);
  380. *first_id = id;
  381. return 0;
  382. }
  383. static struct genl_family genl_ctrl;
  384. static int genl_validate_assign_mc_groups(struct genl_family *family)
  385. {
  386. int first_id;
  387. int n_groups = family->n_mcgrps;
  388. int err = 0, i;
  389. bool groups_allocated = false;
  390. if (!n_groups)
  391. return 0;
  392. for (i = 0; i < n_groups; i++) {
  393. const struct genl_multicast_group *grp = &family->mcgrps[i];
  394. if (WARN_ON(grp->name[0] == '\0'))
  395. return -EINVAL;
  396. if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
  397. return -EINVAL;
  398. }
  399. /* special-case our own group and hacks */
  400. if (family == &genl_ctrl) {
  401. first_id = GENL_ID_CTRL;
  402. BUG_ON(n_groups != 1);
  403. } else if (strcmp(family->name, "NET_DM") == 0) {
  404. first_id = 1;
  405. BUG_ON(n_groups != 1);
  406. } else if (family->id == GENL_ID_VFS_DQUOT) {
  407. first_id = GENL_ID_VFS_DQUOT;
  408. BUG_ON(n_groups != 1);
  409. } else if (family->id == GENL_ID_PMCRAID) {
  410. first_id = GENL_ID_PMCRAID;
  411. BUG_ON(n_groups != 1);
  412. } else {
  413. groups_allocated = true;
  414. err = genl_allocate_reserve_groups(n_groups, &first_id);
  415. if (err)
  416. return err;
  417. }
  418. family->mcgrp_offset = first_id;
  419. /* if still initializing, can't and don't need to realloc bitmaps */
  420. if (!init_net.genl_sock)
  421. return 0;
  422. if (family->netnsok) {
  423. struct net *net;
  424. netlink_table_grab();
  425. rcu_read_lock();
  426. for_each_net_rcu(net) {
  427. err = __netlink_change_ngroups(net->genl_sock,
  428. mc_groups_longs * BITS_PER_LONG);
  429. if (err) {
  430. /*
  431. * No need to roll back, can only fail if
  432. * memory allocation fails and then the
  433. * number of _possible_ groups has been
  434. * increased on some sockets which is ok.
  435. */
  436. break;
  437. }
  438. }
  439. rcu_read_unlock();
  440. netlink_table_ungrab();
  441. } else {
  442. err = netlink_change_ngroups(init_net.genl_sock,
  443. mc_groups_longs * BITS_PER_LONG);
  444. }
  445. if (groups_allocated && err) {
  446. for (i = 0; i < family->n_mcgrps; i++)
  447. clear_bit(family->mcgrp_offset + i, mc_groups);
  448. }
  449. return err;
  450. }
  451. static void genl_unregister_mc_groups(const struct genl_family *family)
  452. {
  453. struct net *net;
  454. int i;
  455. netlink_table_grab();
  456. rcu_read_lock();
  457. for_each_net_rcu(net) {
  458. for (i = 0; i < family->n_mcgrps; i++)
  459. __netlink_clear_multicast_users(
  460. net->genl_sock, family->mcgrp_offset + i);
  461. }
  462. rcu_read_unlock();
  463. netlink_table_ungrab();
  464. for (i = 0; i < family->n_mcgrps; i++) {
  465. int grp_id = family->mcgrp_offset + i;
  466. if (grp_id != 1)
  467. clear_bit(grp_id, mc_groups);
  468. genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
  469. &family->mcgrps[i], grp_id);
  470. }
  471. }
  472. static bool genl_split_op_check(const struct genl_split_ops *op)
  473. {
  474. if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
  475. GENL_CMD_CAP_DUMP)) != 1))
  476. return true;
  477. return false;
  478. }
  479. static int genl_validate_ops(const struct genl_family *family)
  480. {
  481. struct genl_op_iter i, j;
  482. unsigned int s;
  483. if (WARN_ON(family->n_ops && !family->ops) ||
  484. WARN_ON(family->n_small_ops && !family->small_ops) ||
  485. WARN_ON(family->n_split_ops && !family->split_ops))
  486. return -EINVAL;
  487. for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
  488. if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
  489. return -EINVAL;
  490. if (WARN_ON(i.cmd >= family->resv_start_op &&
  491. (i.doit.validate || i.dumpit.validate)))
  492. return -EINVAL;
  493. genl_op_iter_copy(&j, &i);
  494. while (genl_op_iter_next(&j)) {
  495. if (i.cmd == j.cmd)
  496. return -EINVAL;
  497. }
  498. }
  499. if (family->n_split_ops) {
  500. if (genl_split_op_check(&family->split_ops[0]))
  501. return -EINVAL;
  502. }
  503. for (s = 1; s < family->n_split_ops; s++) {
  504. const struct genl_split_ops *a, *b;
  505. a = &family->split_ops[s - 1];
  506. b = &family->split_ops[s];
  507. if (genl_split_op_check(b))
  508. return -EINVAL;
  509. /* Check sort order */
  510. if (a->cmd < b->cmd) {
  511. continue;
  512. } else if (a->cmd > b->cmd) {
  513. WARN_ON(1);
  514. return -EINVAL;
  515. }
  516. if (a->internal_flags != b->internal_flags ||
  517. ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
  518. GENL_CMD_CAP_DUMP))) {
  519. WARN_ON(1);
  520. return -EINVAL;
  521. }
  522. if ((a->flags & GENL_CMD_CAP_DO) &&
  523. (b->flags & GENL_CMD_CAP_DUMP))
  524. continue;
  525. WARN_ON(1);
  526. return -EINVAL;
  527. }
  528. return 0;
  529. }
  530. static void *genl_sk_priv_alloc(struct genl_family *family)
  531. {
  532. void *priv;
  533. priv = kzalloc(family->sock_priv_size, GFP_KERNEL);
  534. if (!priv)
  535. return ERR_PTR(-ENOMEM);
  536. if (family->sock_priv_init)
  537. family->sock_priv_init(priv);
  538. return priv;
  539. }
  540. static void genl_sk_priv_free(const struct genl_family *family, void *priv)
  541. {
  542. if (family->sock_priv_destroy)
  543. family->sock_priv_destroy(priv);
  544. kfree(priv);
  545. }
  546. static int genl_sk_privs_alloc(struct genl_family *family)
  547. {
  548. if (!family->sock_priv_size)
  549. return 0;
  550. family->sock_privs = kzalloc(sizeof(*family->sock_privs), GFP_KERNEL);
  551. if (!family->sock_privs)
  552. return -ENOMEM;
  553. xa_init(family->sock_privs);
  554. return 0;
  555. }
  556. static void genl_sk_privs_free(const struct genl_family *family)
  557. {
  558. unsigned long id;
  559. void *priv;
  560. if (!family->sock_priv_size)
  561. return;
  562. xa_for_each(family->sock_privs, id, priv)
  563. genl_sk_priv_free(family, priv);
  564. xa_destroy(family->sock_privs);
  565. kfree(family->sock_privs);
  566. }
  567. static void genl_sk_priv_free_by_sock(struct genl_family *family,
  568. struct sock *sk)
  569. {
  570. void *priv;
  571. if (!family->sock_priv_size)
  572. return;
  573. priv = xa_erase(family->sock_privs, (unsigned long) sk);
  574. if (!priv)
  575. return;
  576. genl_sk_priv_free(family, priv);
  577. }
  578. static void genl_release(struct sock *sk, unsigned long *groups)
  579. {
  580. struct genl_family *family;
  581. unsigned int id;
  582. down_read(&cb_lock);
  583. idr_for_each_entry(&genl_fam_idr, family, id)
  584. genl_sk_priv_free_by_sock(family, sk);
  585. up_read(&cb_lock);
  586. }
  587. /**
  588. * __genl_sk_priv_get - Get family private pointer for socket, if exists
  589. *
  590. * @family: family
  591. * @sk: socket
  592. *
  593. * Lookup a private memory for a Generic netlink family and specified socket.
  594. *
  595. * Caller should make sure this is called in RCU read locked section.
  596. *
  597. * Return: valid pointer on success, otherwise negative error value
  598. * encoded by ERR_PTR(), NULL in case priv does not exist.
  599. */
  600. void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk)
  601. {
  602. if (WARN_ON_ONCE(!family->sock_privs))
  603. return ERR_PTR(-EINVAL);
  604. return xa_load(family->sock_privs, (unsigned long) sk);
  605. }
  606. /**
  607. * genl_sk_priv_get - Get family private pointer for socket
  608. *
  609. * @family: family
  610. * @sk: socket
  611. *
  612. * Lookup a private memory for a Generic netlink family and specified socket.
  613. * Allocate the private memory in case it was not already done.
  614. *
  615. * Return: valid pointer on success, otherwise negative error value
  616. * encoded by ERR_PTR().
  617. */
  618. void *genl_sk_priv_get(struct genl_family *family, struct sock *sk)
  619. {
  620. void *priv, *old_priv;
  621. priv = __genl_sk_priv_get(family, sk);
  622. if (priv)
  623. return priv;
  624. /* priv for the family does not exist so far, create it. */
  625. priv = genl_sk_priv_alloc(family);
  626. if (IS_ERR(priv))
  627. return ERR_CAST(priv);
  628. old_priv = xa_cmpxchg(family->sock_privs, (unsigned long) sk, NULL,
  629. priv, GFP_KERNEL);
  630. if (old_priv) {
  631. genl_sk_priv_free(family, priv);
  632. if (xa_is_err(old_priv))
  633. return ERR_PTR(xa_err(old_priv));
  634. /* Race happened, priv for the socket was already inserted. */
  635. return old_priv;
  636. }
  637. return priv;
  638. }
  639. /**
  640. * genl_register_family - register a generic netlink family
  641. * @family: generic netlink family
  642. *
  643. * Registers the specified family after validating it first. Only one
  644. * family may be registered with the same family name or identifier.
  645. *
  646. * The family's ops, multicast groups and module pointer must already
  647. * be assigned.
  648. *
  649. * Return 0 on success or a negative error code.
  650. */
  651. int genl_register_family(struct genl_family *family)
  652. {
  653. int err, i;
  654. int start = GENL_START_ALLOC, end = GENL_MAX_ID;
  655. err = genl_validate_ops(family);
  656. if (err)
  657. return err;
  658. genl_lock_all();
  659. if (genl_family_find_byname(family->name)) {
  660. err = -EEXIST;
  661. goto errout_locked;
  662. }
  663. err = genl_sk_privs_alloc(family);
  664. if (err)
  665. goto errout_locked;
  666. /*
  667. * Sadly, a few cases need to be special-cased
  668. * due to them having previously abused the API
  669. * and having used their family ID also as their
  670. * multicast group ID, so we use reserved IDs
  671. * for both to be sure we can do that mapping.
  672. */
  673. if (family == &genl_ctrl) {
  674. /* and this needs to be special for initial family lookups */
  675. start = end = GENL_ID_CTRL;
  676. } else if (strcmp(family->name, "pmcraid") == 0) {
  677. start = end = GENL_ID_PMCRAID;
  678. } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
  679. start = end = GENL_ID_VFS_DQUOT;
  680. }
  681. family->id = idr_alloc_cyclic(&genl_fam_idr, family,
  682. start, end + 1, GFP_KERNEL);
  683. if (family->id < 0) {
  684. err = family->id;
  685. goto errout_sk_privs_free;
  686. }
  687. err = genl_validate_assign_mc_groups(family);
  688. if (err)
  689. goto errout_remove;
  690. genl_unlock_all();
  691. /* send all events */
  692. genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
  693. for (i = 0; i < family->n_mcgrps; i++)
  694. genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
  695. &family->mcgrps[i], family->mcgrp_offset + i);
  696. return 0;
  697. errout_remove:
  698. idr_remove(&genl_fam_idr, family->id);
  699. errout_sk_privs_free:
  700. genl_sk_privs_free(family);
  701. errout_locked:
  702. genl_unlock_all();
  703. return err;
  704. }
  705. EXPORT_SYMBOL(genl_register_family);
  706. /**
  707. * genl_unregister_family - unregister generic netlink family
  708. * @family: generic netlink family
  709. *
  710. * Unregisters the specified family.
  711. *
  712. * Returns 0 on success or a negative error code.
  713. */
  714. int genl_unregister_family(const struct genl_family *family)
  715. {
  716. genl_lock_all();
  717. if (!genl_family_find_byid(family->id)) {
  718. genl_unlock_all();
  719. return -ENOENT;
  720. }
  721. genl_unregister_mc_groups(family);
  722. idr_remove(&genl_fam_idr, family->id);
  723. up_write(&cb_lock);
  724. wait_event(genl_sk_destructing_waitq,
  725. atomic_read(&genl_sk_destructing_cnt) == 0);
  726. genl_sk_privs_free(family);
  727. genl_unlock();
  728. genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
  729. return 0;
  730. }
  731. EXPORT_SYMBOL(genl_unregister_family);
  732. /**
  733. * genlmsg_put - Add generic netlink header to netlink message
  734. * @skb: socket buffer holding the message
  735. * @portid: netlink portid the message is addressed to
  736. * @seq: sequence number (usually the one of the sender)
  737. * @family: generic netlink family
  738. * @flags: netlink message flags
  739. * @cmd: generic netlink command
  740. *
  741. * Returns pointer to user specific header
  742. */
  743. void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
  744. const struct genl_family *family, int flags, u8 cmd)
  745. {
  746. struct nlmsghdr *nlh;
  747. struct genlmsghdr *hdr;
  748. nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
  749. family->hdrsize, flags);
  750. if (nlh == NULL)
  751. return NULL;
  752. hdr = nlmsg_data(nlh);
  753. hdr->cmd = cmd;
  754. hdr->version = family->version;
  755. hdr->reserved = 0;
  756. return (char *) hdr + GENL_HDRLEN;
  757. }
  758. EXPORT_SYMBOL(genlmsg_put);
  759. static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
  760. {
  761. return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL);
  762. }
  763. static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
  764. {
  765. kfree(info);
  766. }
  767. static struct nlattr **
  768. genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
  769. struct nlmsghdr *nlh,
  770. struct netlink_ext_ack *extack,
  771. const struct genl_split_ops *ops,
  772. int hdrlen,
  773. enum genl_validate_flags no_strict_flag)
  774. {
  775. enum netlink_validation validate = ops->validate & no_strict_flag ?
  776. NL_VALIDATE_LIBERAL :
  777. NL_VALIDATE_STRICT;
  778. struct nlattr **attrbuf;
  779. int err;
  780. if (!ops->maxattr)
  781. return NULL;
  782. attrbuf = kmalloc_array(ops->maxattr + 1,
  783. sizeof(struct nlattr *), GFP_KERNEL);
  784. if (!attrbuf)
  785. return ERR_PTR(-ENOMEM);
  786. err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
  787. validate, extack);
  788. if (err) {
  789. kfree(attrbuf);
  790. return ERR_PTR(err);
  791. }
  792. return attrbuf;
  793. }
  794. static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
  795. {
  796. kfree(attrbuf);
  797. }
  798. struct genl_start_context {
  799. const struct genl_family *family;
  800. struct nlmsghdr *nlh;
  801. struct netlink_ext_ack *extack;
  802. const struct genl_split_ops *ops;
  803. int hdrlen;
  804. };
  805. static int genl_start(struct netlink_callback *cb)
  806. {
  807. struct genl_start_context *ctx = cb->data;
  808. const struct genl_split_ops *ops;
  809. struct genl_dumpit_info *info;
  810. struct nlattr **attrs = NULL;
  811. int rc = 0;
  812. ops = ctx->ops;
  813. if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
  814. ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
  815. return -EINVAL;
  816. attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
  817. ops, ctx->hdrlen,
  818. GENL_DONT_VALIDATE_DUMP_STRICT);
  819. if (IS_ERR(attrs))
  820. return PTR_ERR(attrs);
  821. info = genl_dumpit_info_alloc();
  822. if (!info) {
  823. genl_family_rcv_msg_attrs_free(attrs);
  824. return -ENOMEM;
  825. }
  826. info->op = *ops;
  827. info->info.family = ctx->family;
  828. info->info.snd_seq = cb->nlh->nlmsg_seq;
  829. info->info.snd_portid = NETLINK_CB(cb->skb).portid;
  830. info->info.nlhdr = cb->nlh;
  831. info->info.genlhdr = nlmsg_data(cb->nlh);
  832. info->info.attrs = attrs;
  833. genl_info_net_set(&info->info, sock_net(cb->skb->sk));
  834. info->info.extack = cb->extack;
  835. memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr));
  836. cb->data = info;
  837. if (ops->start) {
  838. genl_op_lock(ctx->family);
  839. rc = ops->start(cb);
  840. genl_op_unlock(ctx->family);
  841. }
  842. if (rc) {
  843. genl_family_rcv_msg_attrs_free(info->info.attrs);
  844. genl_dumpit_info_free(info);
  845. cb->data = NULL;
  846. }
  847. return rc;
  848. }
  849. static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
  850. {
  851. struct genl_dumpit_info *dump_info = cb->data;
  852. const struct genl_split_ops *ops = &dump_info->op;
  853. struct genl_info *info = &dump_info->info;
  854. int rc;
  855. info->extack = cb->extack;
  856. genl_op_lock(info->family);
  857. rc = ops->dumpit(skb, cb);
  858. genl_op_unlock(info->family);
  859. return rc;
  860. }
  861. static int genl_done(struct netlink_callback *cb)
  862. {
  863. struct genl_dumpit_info *dump_info = cb->data;
  864. const struct genl_split_ops *ops = &dump_info->op;
  865. struct genl_info *info = &dump_info->info;
  866. int rc = 0;
  867. info->extack = cb->extack;
  868. if (ops->done) {
  869. genl_op_lock(info->family);
  870. rc = ops->done(cb);
  871. genl_op_unlock(info->family);
  872. }
  873. genl_family_rcv_msg_attrs_free(info->attrs);
  874. genl_dumpit_info_free(dump_info);
  875. return rc;
  876. }
  877. static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
  878. struct sk_buff *skb,
  879. struct nlmsghdr *nlh,
  880. struct netlink_ext_ack *extack,
  881. const struct genl_split_ops *ops,
  882. int hdrlen, struct net *net)
  883. {
  884. struct genl_start_context ctx;
  885. struct netlink_dump_control c = {
  886. .module = family->module,
  887. .data = &ctx,
  888. .start = genl_start,
  889. .dump = genl_dumpit,
  890. .done = genl_done,
  891. .extack = extack,
  892. };
  893. int err;
  894. ctx.family = family;
  895. ctx.nlh = nlh;
  896. ctx.extack = extack;
  897. ctx.ops = ops;
  898. ctx.hdrlen = hdrlen;
  899. genl_op_unlock(family);
  900. err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
  901. genl_op_lock(family);
  902. return err;
  903. }
  904. static int genl_family_rcv_msg_doit(const struct genl_family *family,
  905. struct sk_buff *skb,
  906. struct nlmsghdr *nlh,
  907. struct netlink_ext_ack *extack,
  908. const struct genl_split_ops *ops,
  909. int hdrlen, struct net *net)
  910. {
  911. struct nlattr **attrbuf;
  912. struct genl_info info;
  913. int err;
  914. attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
  915. ops, hdrlen,
  916. GENL_DONT_VALIDATE_STRICT);
  917. if (IS_ERR(attrbuf))
  918. return PTR_ERR(attrbuf);
  919. info.snd_seq = nlh->nlmsg_seq;
  920. info.snd_portid = NETLINK_CB(skb).portid;
  921. info.family = family;
  922. info.nlhdr = nlh;
  923. info.genlhdr = nlmsg_data(nlh);
  924. info.attrs = attrbuf;
  925. info.extack = extack;
  926. genl_info_net_set(&info, net);
  927. memset(&info.user_ptr, 0, sizeof(info.user_ptr));
  928. if (ops->pre_doit) {
  929. err = ops->pre_doit(ops, skb, &info);
  930. if (err)
  931. goto out;
  932. }
  933. err = ops->doit(skb, &info);
  934. if (ops->post_doit)
  935. ops->post_doit(ops, skb, &info);
  936. out:
  937. genl_family_rcv_msg_attrs_free(attrbuf);
  938. return err;
  939. }
  940. static int genl_header_check(const struct genl_family *family,
  941. struct nlmsghdr *nlh, struct genlmsghdr *hdr,
  942. struct netlink_ext_ack *extack)
  943. {
  944. u16 flags;
  945. /* Only for commands added after we started validating */
  946. if (hdr->cmd < family->resv_start_op)
  947. return 0;
  948. if (hdr->reserved) {
  949. NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
  950. return -EINVAL;
  951. }
  952. /* Old netlink flags have pretty loose semantics, allow only the flags
  953. * consumed by the core where we can enforce the meaning.
  954. */
  955. flags = nlh->nlmsg_flags;
  956. if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
  957. flags &= ~NLM_F_DUMP;
  958. if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
  959. NL_SET_ERR_MSG(extack,
  960. "ambiguous or reserved bits set in nlmsg_flags");
  961. return -EINVAL;
  962. }
  963. return 0;
  964. }
  965. static int genl_family_rcv_msg(const struct genl_family *family,
  966. struct sk_buff *skb,
  967. struct nlmsghdr *nlh,
  968. struct netlink_ext_ack *extack)
  969. {
  970. struct net *net = sock_net(skb->sk);
  971. struct genlmsghdr *hdr = nlmsg_data(nlh);
  972. struct genl_split_ops op;
  973. int hdrlen;
  974. u8 flags;
  975. /* this family doesn't exist in this netns */
  976. if (!family->netnsok && !net_eq(net, &init_net))
  977. return -ENOENT;
  978. hdrlen = GENL_HDRLEN + family->hdrsize;
  979. if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
  980. return -EINVAL;
  981. if (genl_header_check(family, nlh, hdr, extack))
  982. return -EINVAL;
  983. flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
  984. GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
  985. if (genl_get_cmd(hdr->cmd, flags, family, &op))
  986. return -EOPNOTSUPP;
  987. if ((op.flags & GENL_ADMIN_PERM) &&
  988. !netlink_capable(skb, CAP_NET_ADMIN))
  989. return -EPERM;
  990. if ((op.flags & GENL_UNS_ADMIN_PERM) &&
  991. !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  992. return -EPERM;
  993. if (flags & GENL_CMD_CAP_DUMP)
  994. return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
  995. &op, hdrlen, net);
  996. else
  997. return genl_family_rcv_msg_doit(family, skb, nlh, extack,
  998. &op, hdrlen, net);
  999. }
  1000. static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
  1001. struct netlink_ext_ack *extack)
  1002. {
  1003. const struct genl_family *family;
  1004. int err;
  1005. family = genl_family_find_byid(nlh->nlmsg_type);
  1006. if (family == NULL)
  1007. return -ENOENT;
  1008. genl_op_lock(family);
  1009. err = genl_family_rcv_msg(family, skb, nlh, extack);
  1010. genl_op_unlock(family);
  1011. return err;
  1012. }
  1013. static void genl_rcv(struct sk_buff *skb)
  1014. {
  1015. down_read(&cb_lock);
  1016. netlink_rcv_skb(skb, &genl_rcv_msg);
  1017. up_read(&cb_lock);
  1018. }
  1019. /**************************************************************************
  1020. * Controller
  1021. **************************************************************************/
  1022. static struct genl_family genl_ctrl;
  1023. static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
  1024. u32 flags, struct sk_buff *skb, u8 cmd)
  1025. {
  1026. struct genl_op_iter i;
  1027. void *hdr;
  1028. hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
  1029. if (hdr == NULL)
  1030. return -EMSGSIZE;
  1031. if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
  1032. nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
  1033. nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
  1034. nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
  1035. nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
  1036. goto nla_put_failure;
  1037. if (genl_op_iter_init(family, &i)) {
  1038. struct nlattr *nla_ops;
  1039. nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
  1040. if (nla_ops == NULL)
  1041. goto nla_put_failure;
  1042. while (genl_op_iter_next(&i)) {
  1043. struct nlattr *nest;
  1044. u32 op_flags;
  1045. op_flags = i.flags;
  1046. if (i.doit.policy || i.dumpit.policy)
  1047. op_flags |= GENL_CMD_CAP_HASPOL;
  1048. nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
  1049. if (nest == NULL)
  1050. goto nla_put_failure;
  1051. if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
  1052. nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
  1053. goto nla_put_failure;
  1054. nla_nest_end(skb, nest);
  1055. }
  1056. nla_nest_end(skb, nla_ops);
  1057. }
  1058. if (family->n_mcgrps) {
  1059. struct nlattr *nla_grps;
  1060. int i;
  1061. nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
  1062. if (nla_grps == NULL)
  1063. goto nla_put_failure;
  1064. for (i = 0; i < family->n_mcgrps; i++) {
  1065. struct nlattr *nest;
  1066. const struct genl_multicast_group *grp;
  1067. grp = &family->mcgrps[i];
  1068. nest = nla_nest_start_noflag(skb, i + 1);
  1069. if (nest == NULL)
  1070. goto nla_put_failure;
  1071. if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
  1072. family->mcgrp_offset + i) ||
  1073. nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
  1074. grp->name))
  1075. goto nla_put_failure;
  1076. nla_nest_end(skb, nest);
  1077. }
  1078. nla_nest_end(skb, nla_grps);
  1079. }
  1080. genlmsg_end(skb, hdr);
  1081. return 0;
  1082. nla_put_failure:
  1083. genlmsg_cancel(skb, hdr);
  1084. return -EMSGSIZE;
  1085. }
  1086. static int ctrl_fill_mcgrp_info(const struct genl_family *family,
  1087. const struct genl_multicast_group *grp,
  1088. int grp_id, u32 portid, u32 seq, u32 flags,
  1089. struct sk_buff *skb, u8 cmd)
  1090. {
  1091. void *hdr;
  1092. struct nlattr *nla_grps;
  1093. struct nlattr *nest;
  1094. hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
  1095. if (hdr == NULL)
  1096. return -1;
  1097. if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
  1098. nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
  1099. goto nla_put_failure;
  1100. nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
  1101. if (nla_grps == NULL)
  1102. goto nla_put_failure;
  1103. nest = nla_nest_start_noflag(skb, 1);
  1104. if (nest == NULL)
  1105. goto nla_put_failure;
  1106. if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
  1107. nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
  1108. grp->name))
  1109. goto nla_put_failure;
  1110. nla_nest_end(skb, nest);
  1111. nla_nest_end(skb, nla_grps);
  1112. genlmsg_end(skb, hdr);
  1113. return 0;
  1114. nla_put_failure:
  1115. genlmsg_cancel(skb, hdr);
  1116. return -EMSGSIZE;
  1117. }
  1118. static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
  1119. {
  1120. int n = 0;
  1121. struct genl_family *rt;
  1122. struct net *net = sock_net(skb->sk);
  1123. int fams_to_skip = cb->args[0];
  1124. unsigned int id;
  1125. int err = 0;
  1126. idr_for_each_entry(&genl_fam_idr, rt, id) {
  1127. if (!rt->netnsok && !net_eq(net, &init_net))
  1128. continue;
  1129. if (n++ < fams_to_skip)
  1130. continue;
  1131. err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
  1132. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1133. skb, CTRL_CMD_NEWFAMILY);
  1134. if (err) {
  1135. n--;
  1136. break;
  1137. }
  1138. }
  1139. cb->args[0] = n;
  1140. return err;
  1141. }
  1142. static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
  1143. u32 portid, int seq, u8 cmd)
  1144. {
  1145. struct sk_buff *skb;
  1146. int err;
  1147. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1148. if (skb == NULL)
  1149. return ERR_PTR(-ENOBUFS);
  1150. err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
  1151. if (err < 0) {
  1152. nlmsg_free(skb);
  1153. return ERR_PTR(err);
  1154. }
  1155. return skb;
  1156. }
  1157. static struct sk_buff *
  1158. ctrl_build_mcgrp_msg(const struct genl_family *family,
  1159. const struct genl_multicast_group *grp,
  1160. int grp_id, u32 portid, int seq, u8 cmd)
  1161. {
  1162. struct sk_buff *skb;
  1163. int err;
  1164. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1165. if (skb == NULL)
  1166. return ERR_PTR(-ENOBUFS);
  1167. err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
  1168. seq, 0, skb, cmd);
  1169. if (err < 0) {
  1170. nlmsg_free(skb);
  1171. return ERR_PTR(err);
  1172. }
  1173. return skb;
  1174. }
  1175. static const struct nla_policy ctrl_policy_family[] = {
  1176. [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
  1177. [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
  1178. .len = GENL_NAMSIZ - 1 },
  1179. };
  1180. static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
  1181. {
  1182. struct sk_buff *msg;
  1183. const struct genl_family *res = NULL;
  1184. int err = -EINVAL;
  1185. if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
  1186. u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
  1187. res = genl_family_find_byid(id);
  1188. err = -ENOENT;
  1189. }
  1190. if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
  1191. char *name;
  1192. name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
  1193. res = genl_family_find_byname(name);
  1194. #ifdef CONFIG_MODULES
  1195. if (res == NULL) {
  1196. genl_unlock();
  1197. up_read(&cb_lock);
  1198. request_module("net-pf-%d-proto-%d-family-%s",
  1199. PF_NETLINK, NETLINK_GENERIC, name);
  1200. down_read(&cb_lock);
  1201. genl_lock();
  1202. res = genl_family_find_byname(name);
  1203. }
  1204. #endif
  1205. err = -ENOENT;
  1206. }
  1207. if (res == NULL)
  1208. return err;
  1209. if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
  1210. /* family doesn't exist here */
  1211. return -ENOENT;
  1212. }
  1213. msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
  1214. CTRL_CMD_NEWFAMILY);
  1215. if (IS_ERR(msg))
  1216. return PTR_ERR(msg);
  1217. return genlmsg_reply(msg, info);
  1218. }
  1219. static int genl_ctrl_event(int event, const struct genl_family *family,
  1220. const struct genl_multicast_group *grp,
  1221. int grp_id)
  1222. {
  1223. struct sk_buff *msg;
  1224. /* genl is still initialising */
  1225. if (!init_net.genl_sock)
  1226. return 0;
  1227. switch (event) {
  1228. case CTRL_CMD_NEWFAMILY:
  1229. case CTRL_CMD_DELFAMILY:
  1230. WARN_ON(grp);
  1231. msg = ctrl_build_family_msg(family, 0, 0, event);
  1232. break;
  1233. case CTRL_CMD_NEWMCAST_GRP:
  1234. case CTRL_CMD_DELMCAST_GRP:
  1235. BUG_ON(!grp);
  1236. msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
  1237. break;
  1238. default:
  1239. return -EINVAL;
  1240. }
  1241. if (IS_ERR(msg))
  1242. return PTR_ERR(msg);
  1243. if (!family->netnsok)
  1244. genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
  1245. 0, GFP_KERNEL);
  1246. else
  1247. genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
  1248. return 0;
  1249. }
  1250. struct ctrl_dump_policy_ctx {
  1251. struct netlink_policy_dump_state *state;
  1252. const struct genl_family *rt;
  1253. struct genl_op_iter *op_iter;
  1254. u32 op;
  1255. u16 fam_id;
  1256. u8 dump_map:1,
  1257. single_op:1;
  1258. };
  1259. static const struct nla_policy ctrl_policy_policy[] = {
  1260. [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
  1261. [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
  1262. .len = GENL_NAMSIZ - 1 },
  1263. [CTRL_ATTR_OP] = { .type = NLA_U32 },
  1264. };
  1265. static int ctrl_dumppolicy_start(struct netlink_callback *cb)
  1266. {
  1267. const struct genl_dumpit_info *info = genl_dumpit_info(cb);
  1268. struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
  1269. struct nlattr **tb = info->info.attrs;
  1270. const struct genl_family *rt;
  1271. struct genl_op_iter i;
  1272. int err;
  1273. BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
  1274. if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
  1275. return -EINVAL;
  1276. if (tb[CTRL_ATTR_FAMILY_ID]) {
  1277. ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
  1278. } else {
  1279. rt = genl_family_find_byname(
  1280. nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
  1281. if (!rt)
  1282. return -ENOENT;
  1283. ctx->fam_id = rt->id;
  1284. }
  1285. rt = genl_family_find_byid(ctx->fam_id);
  1286. if (!rt)
  1287. return -ENOENT;
  1288. ctx->rt = rt;
  1289. if (tb[CTRL_ATTR_OP]) {
  1290. struct genl_split_ops doit, dump;
  1291. ctx->single_op = true;
  1292. ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
  1293. err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
  1294. if (err) {
  1295. NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
  1296. return err;
  1297. }
  1298. if (doit.policy) {
  1299. err = netlink_policy_dump_add_policy(&ctx->state,
  1300. doit.policy,
  1301. doit.maxattr);
  1302. if (err)
  1303. goto err_free_state;
  1304. }
  1305. if (dump.policy) {
  1306. err = netlink_policy_dump_add_policy(&ctx->state,
  1307. dump.policy,
  1308. dump.maxattr);
  1309. if (err)
  1310. goto err_free_state;
  1311. }
  1312. if (!ctx->state)
  1313. return -ENODATA;
  1314. ctx->dump_map = 1;
  1315. return 0;
  1316. }
  1317. ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL);
  1318. if (!ctx->op_iter)
  1319. return -ENOMEM;
  1320. genl_op_iter_init(rt, ctx->op_iter);
  1321. ctx->dump_map = genl_op_iter_next(ctx->op_iter);
  1322. for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
  1323. if (i.doit.policy) {
  1324. err = netlink_policy_dump_add_policy(&ctx->state,
  1325. i.doit.policy,
  1326. i.doit.maxattr);
  1327. if (err)
  1328. goto err_free_state;
  1329. }
  1330. if (i.dumpit.policy) {
  1331. err = netlink_policy_dump_add_policy(&ctx->state,
  1332. i.dumpit.policy,
  1333. i.dumpit.maxattr);
  1334. if (err)
  1335. goto err_free_state;
  1336. }
  1337. }
  1338. if (!ctx->state) {
  1339. err = -ENODATA;
  1340. goto err_free_op_iter;
  1341. }
  1342. return 0;
  1343. err_free_state:
  1344. netlink_policy_dump_free(ctx->state);
  1345. err_free_op_iter:
  1346. kfree(ctx->op_iter);
  1347. return err;
  1348. }
  1349. static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
  1350. struct netlink_callback *cb)
  1351. {
  1352. struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
  1353. void *hdr;
  1354. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  1355. cb->nlh->nlmsg_seq, &genl_ctrl,
  1356. NLM_F_MULTI, CTRL_CMD_GETPOLICY);
  1357. if (!hdr)
  1358. return NULL;
  1359. if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
  1360. return NULL;
  1361. return hdr;
  1362. }
  1363. static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
  1364. struct netlink_callback *cb,
  1365. struct genl_split_ops *doit,
  1366. struct genl_split_ops *dumpit)
  1367. {
  1368. struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
  1369. struct nlattr *nest_pol, *nest_op;
  1370. void *hdr;
  1371. int idx;
  1372. /* skip if we have nothing to show */
  1373. if (!doit->policy && !dumpit->policy)
  1374. return 0;
  1375. hdr = ctrl_dumppolicy_prep(skb, cb);
  1376. if (!hdr)
  1377. return -ENOBUFS;
  1378. nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
  1379. if (!nest_pol)
  1380. goto err;
  1381. nest_op = nla_nest_start(skb, doit->cmd);
  1382. if (!nest_op)
  1383. goto err;
  1384. if (doit->policy) {
  1385. idx = netlink_policy_dump_get_policy_idx(ctx->state,
  1386. doit->policy,
  1387. doit->maxattr);
  1388. if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
  1389. goto err;
  1390. }
  1391. if (dumpit->policy) {
  1392. idx = netlink_policy_dump_get_policy_idx(ctx->state,
  1393. dumpit->policy,
  1394. dumpit->maxattr);
  1395. if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
  1396. goto err;
  1397. }
  1398. nla_nest_end(skb, nest_op);
  1399. nla_nest_end(skb, nest_pol);
  1400. genlmsg_end(skb, hdr);
  1401. return 0;
  1402. err:
  1403. genlmsg_cancel(skb, hdr);
  1404. return -ENOBUFS;
  1405. }
  1406. static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
  1407. {
  1408. struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
  1409. void *hdr;
  1410. if (ctx->dump_map) {
  1411. if (ctx->single_op) {
  1412. struct genl_split_ops doit, dumpit;
  1413. if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
  1414. &doit, &dumpit)))
  1415. return -ENOENT;
  1416. if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
  1417. return skb->len;
  1418. /* done with the per-op policy index list */
  1419. ctx->dump_map = 0;
  1420. }
  1421. while (ctx->dump_map) {
  1422. if (ctrl_dumppolicy_put_op(skb, cb,
  1423. &ctx->op_iter->doit,
  1424. &ctx->op_iter->dumpit))
  1425. return skb->len;
  1426. ctx->dump_map = genl_op_iter_next(ctx->op_iter);
  1427. }
  1428. }
  1429. while (netlink_policy_dump_loop(ctx->state)) {
  1430. struct nlattr *nest;
  1431. hdr = ctrl_dumppolicy_prep(skb, cb);
  1432. if (!hdr)
  1433. goto nla_put_failure;
  1434. nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
  1435. if (!nest)
  1436. goto nla_put_failure;
  1437. if (netlink_policy_dump_write(skb, ctx->state))
  1438. goto nla_put_failure;
  1439. nla_nest_end(skb, nest);
  1440. genlmsg_end(skb, hdr);
  1441. }
  1442. return skb->len;
  1443. nla_put_failure:
  1444. genlmsg_cancel(skb, hdr);
  1445. return skb->len;
  1446. }
  1447. static int ctrl_dumppolicy_done(struct netlink_callback *cb)
  1448. {
  1449. struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
  1450. kfree(ctx->op_iter);
  1451. netlink_policy_dump_free(ctx->state);
  1452. return 0;
  1453. }
  1454. static const struct genl_split_ops genl_ctrl_ops[] = {
  1455. {
  1456. .cmd = CTRL_CMD_GETFAMILY,
  1457. .validate = GENL_DONT_VALIDATE_STRICT,
  1458. .policy = ctrl_policy_family,
  1459. .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
  1460. .doit = ctrl_getfamily,
  1461. .flags = GENL_CMD_CAP_DO,
  1462. },
  1463. {
  1464. .cmd = CTRL_CMD_GETFAMILY,
  1465. .validate = GENL_DONT_VALIDATE_DUMP,
  1466. .policy = ctrl_policy_family,
  1467. .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
  1468. .dumpit = ctrl_dumpfamily,
  1469. .flags = GENL_CMD_CAP_DUMP,
  1470. },
  1471. {
  1472. .cmd = CTRL_CMD_GETPOLICY,
  1473. .policy = ctrl_policy_policy,
  1474. .maxattr = ARRAY_SIZE(ctrl_policy_policy) - 1,
  1475. .start = ctrl_dumppolicy_start,
  1476. .dumpit = ctrl_dumppolicy,
  1477. .done = ctrl_dumppolicy_done,
  1478. .flags = GENL_CMD_CAP_DUMP,
  1479. },
  1480. };
  1481. static const struct genl_multicast_group genl_ctrl_groups[] = {
  1482. { .name = "notify", },
  1483. };
  1484. static struct genl_family genl_ctrl __ro_after_init = {
  1485. .module = THIS_MODULE,
  1486. .split_ops = genl_ctrl_ops,
  1487. .n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
  1488. .resv_start_op = CTRL_CMD_GETPOLICY + 1,
  1489. .mcgrps = genl_ctrl_groups,
  1490. .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
  1491. .id = GENL_ID_CTRL,
  1492. .name = "nlctrl",
  1493. .version = 0x2,
  1494. .netnsok = true,
  1495. };
  1496. static int genl_bind(struct net *net, int group)
  1497. {
  1498. const struct genl_family *family;
  1499. unsigned int id;
  1500. int ret = 0;
  1501. down_read(&cb_lock);
  1502. idr_for_each_entry(&genl_fam_idr, family, id) {
  1503. const struct genl_multicast_group *grp;
  1504. int i;
  1505. if (family->n_mcgrps == 0)
  1506. continue;
  1507. i = group - family->mcgrp_offset;
  1508. if (i < 0 || i >= family->n_mcgrps)
  1509. continue;
  1510. grp = &family->mcgrps[i];
  1511. if ((grp->flags & GENL_MCAST_CAP_NET_ADMIN) &&
  1512. !ns_capable(net->user_ns, CAP_NET_ADMIN))
  1513. ret = -EPERM;
  1514. if ((grp->flags & GENL_MCAST_CAP_SYS_ADMIN) &&
  1515. !ns_capable(net->user_ns, CAP_SYS_ADMIN))
  1516. ret = -EPERM;
  1517. if (ret)
  1518. break;
  1519. if (family->bind)
  1520. family->bind(i);
  1521. break;
  1522. }
  1523. up_read(&cb_lock);
  1524. return ret;
  1525. }
  1526. static void genl_unbind(struct net *net, int group)
  1527. {
  1528. const struct genl_family *family;
  1529. unsigned int id;
  1530. down_read(&cb_lock);
  1531. idr_for_each_entry(&genl_fam_idr, family, id) {
  1532. int i;
  1533. if (family->n_mcgrps == 0)
  1534. continue;
  1535. i = group - family->mcgrp_offset;
  1536. if (i < 0 || i >= family->n_mcgrps)
  1537. continue;
  1538. if (family->unbind)
  1539. family->unbind(i);
  1540. break;
  1541. }
  1542. up_read(&cb_lock);
  1543. }
  1544. static int __net_init genl_pernet_init(struct net *net)
  1545. {
  1546. struct netlink_kernel_cfg cfg = {
  1547. .input = genl_rcv,
  1548. .flags = NL_CFG_F_NONROOT_RECV,
  1549. .bind = genl_bind,
  1550. .unbind = genl_unbind,
  1551. .release = genl_release,
  1552. };
  1553. /* we'll bump the group number right afterwards */
  1554. net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
  1555. if (!net->genl_sock && net_eq(net, &init_net))
  1556. panic("GENL: Cannot initialize generic netlink\n");
  1557. if (!net->genl_sock)
  1558. return -ENOMEM;
  1559. return 0;
  1560. }
  1561. static void __net_exit genl_pernet_exit(struct net *net)
  1562. {
  1563. netlink_kernel_release(net->genl_sock);
  1564. net->genl_sock = NULL;
  1565. }
  1566. static struct pernet_operations genl_pernet_ops = {
  1567. .init = genl_pernet_init,
  1568. .exit = genl_pernet_exit,
  1569. };
  1570. static int __init genl_init(void)
  1571. {
  1572. int err;
  1573. err = genl_register_family(&genl_ctrl);
  1574. if (err < 0)
  1575. goto problem;
  1576. err = register_pernet_subsys(&genl_pernet_ops);
  1577. if (err)
  1578. goto problem;
  1579. return 0;
  1580. problem:
  1581. panic("GENL: Cannot register controller: %d\n", err);
  1582. }
  1583. core_initcall(genl_init);
  1584. static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
  1585. {
  1586. struct sk_buff *tmp;
  1587. struct net *net, *prev = NULL;
  1588. bool delivered = false;
  1589. int err;
  1590. rcu_read_lock();
  1591. for_each_net_rcu(net) {
  1592. if (prev) {
  1593. tmp = skb_clone(skb, GFP_ATOMIC);
  1594. if (!tmp) {
  1595. err = -ENOMEM;
  1596. goto error;
  1597. }
  1598. err = nlmsg_multicast(prev->genl_sock, tmp,
  1599. portid, group, GFP_ATOMIC);
  1600. if (!err)
  1601. delivered = true;
  1602. else if (err != -ESRCH)
  1603. goto error;
  1604. }
  1605. prev = net;
  1606. }
  1607. err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
  1608. rcu_read_unlock();
  1609. if (!err)
  1610. delivered = true;
  1611. else if (err != -ESRCH)
  1612. return err;
  1613. return delivered ? 0 : -ESRCH;
  1614. error:
  1615. rcu_read_unlock();
  1616. kfree_skb(skb);
  1617. return err;
  1618. }
  1619. int genlmsg_multicast_allns(const struct genl_family *family,
  1620. struct sk_buff *skb, u32 portid,
  1621. unsigned int group)
  1622. {
  1623. if (WARN_ON_ONCE(group >= family->n_mcgrps))
  1624. return -EINVAL;
  1625. group = family->mcgrp_offset + group;
  1626. return genlmsg_mcast(skb, portid, group);
  1627. }
  1628. EXPORT_SYMBOL(genlmsg_multicast_allns);
  1629. void genl_notify(const struct genl_family *family, struct sk_buff *skb,
  1630. struct genl_info *info, u32 group, gfp_t flags)
  1631. {
  1632. struct net *net = genl_info_net(info);
  1633. struct sock *sk = net->genl_sock;
  1634. if (WARN_ON_ONCE(group >= family->n_mcgrps))
  1635. return;
  1636. group = family->mcgrp_offset + group;
  1637. nlmsg_notify(sk, skb, info->snd_portid, group,
  1638. nlmsg_report(info->nlhdr), flags);
  1639. }
  1640. EXPORT_SYMBOL(genl_notify);