cgroup.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Functions to manage eBPF programs attached to cgroups
  4. *
  5. * Copyright (c) 2016 Daniel Mack
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/atomic.h>
  9. #include <linux/cgroup.h>
  10. #include <linux/filter.h>
  11. #include <linux/slab.h>
  12. #include <linux/sysctl.h>
  13. #include <linux/string.h>
  14. #include <linux/bpf.h>
  15. #include <linux/bpf-cgroup.h>
  16. #include <linux/bpf_lsm.h>
  17. #include <linux/bpf_verifier.h>
  18. #include <net/sock.h>
  19. #include <net/bpf_sk_storage.h>
  20. #include "../cgroup/cgroup-internal.h"
  21. DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
  22. EXPORT_SYMBOL(cgroup_bpf_enabled_key);
  23. /*
  24. * cgroup bpf destruction makes heavy use of work items and there can be a lot
  25. * of concurrent destructions. Use a separate workqueue so that cgroup bpf
  26. * destruction work items don't end up filling up max_active of system_wq
  27. * which may lead to deadlock.
  28. */
  29. static struct workqueue_struct *cgroup_bpf_destroy_wq;
  30. static int __init cgroup_bpf_wq_init(void)
  31. {
  32. cgroup_bpf_destroy_wq = alloc_workqueue("cgroup_bpf_destroy", 0, 1);
  33. if (!cgroup_bpf_destroy_wq)
  34. panic("Failed to alloc workqueue for cgroup bpf destroy.\n");
  35. return 0;
  36. }
  37. core_initcall(cgroup_bpf_wq_init);
  38. /* __always_inline is necessary to prevent indirect call through run_prog
  39. * function pointer.
  40. */
  41. static __always_inline int
  42. bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
  43. enum cgroup_bpf_attach_type atype,
  44. const void *ctx, bpf_prog_run_fn run_prog,
  45. int retval, u32 *ret_flags)
  46. {
  47. const struct bpf_prog_array_item *item;
  48. const struct bpf_prog *prog;
  49. const struct bpf_prog_array *array;
  50. struct bpf_run_ctx *old_run_ctx;
  51. struct bpf_cg_run_ctx run_ctx;
  52. u32 func_ret;
  53. run_ctx.retval = retval;
  54. migrate_disable();
  55. rcu_read_lock();
  56. array = rcu_dereference(cgrp->effective[atype]);
  57. item = &array->items[0];
  58. old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
  59. while ((prog = READ_ONCE(item->prog))) {
  60. run_ctx.prog_item = item;
  61. func_ret = run_prog(prog, ctx);
  62. if (ret_flags) {
  63. *(ret_flags) |= (func_ret >> 1);
  64. func_ret &= 1;
  65. }
  66. if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
  67. run_ctx.retval = -EPERM;
  68. item++;
  69. }
  70. bpf_reset_run_ctx(old_run_ctx);
  71. rcu_read_unlock();
  72. migrate_enable();
  73. return run_ctx.retval;
  74. }
  75. unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
  76. const struct bpf_insn *insn)
  77. {
  78. const struct bpf_prog *shim_prog;
  79. struct sock *sk;
  80. struct cgroup *cgrp;
  81. int ret = 0;
  82. u64 *args;
  83. args = (u64 *)ctx;
  84. sk = (void *)(unsigned long)args[0];
  85. /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
  86. shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
  87. cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  88. if (likely(cgrp))
  89. ret = bpf_prog_run_array_cg(&cgrp->bpf,
  90. shim_prog->aux->cgroup_atype,
  91. ctx, bpf_prog_run, 0, NULL);
  92. return ret;
  93. }
  94. unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
  95. const struct bpf_insn *insn)
  96. {
  97. const struct bpf_prog *shim_prog;
  98. struct socket *sock;
  99. struct cgroup *cgrp;
  100. int ret = 0;
  101. u64 *args;
  102. args = (u64 *)ctx;
  103. sock = (void *)(unsigned long)args[0];
  104. /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
  105. shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
  106. cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
  107. if (likely(cgrp))
  108. ret = bpf_prog_run_array_cg(&cgrp->bpf,
  109. shim_prog->aux->cgroup_atype,
  110. ctx, bpf_prog_run, 0, NULL);
  111. return ret;
  112. }
  113. unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
  114. const struct bpf_insn *insn)
  115. {
  116. const struct bpf_prog *shim_prog;
  117. struct cgroup *cgrp;
  118. int ret = 0;
  119. /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
  120. shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
  121. /* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
  122. cgrp = task_dfl_cgroup(current);
  123. if (likely(cgrp))
  124. ret = bpf_prog_run_array_cg(&cgrp->bpf,
  125. shim_prog->aux->cgroup_atype,
  126. ctx, bpf_prog_run, 0, NULL);
  127. return ret;
  128. }
  129. #ifdef CONFIG_BPF_LSM
  130. struct cgroup_lsm_atype {
  131. u32 attach_btf_id;
  132. int refcnt;
  133. };
  134. static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
  135. static enum cgroup_bpf_attach_type
  136. bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
  137. {
  138. int i;
  139. lockdep_assert_held(&cgroup_mutex);
  140. if (attach_type != BPF_LSM_CGROUP)
  141. return to_cgroup_bpf_attach_type(attach_type);
  142. for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
  143. if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
  144. return CGROUP_LSM_START + i;
  145. for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
  146. if (cgroup_lsm_atype[i].attach_btf_id == 0)
  147. return CGROUP_LSM_START + i;
  148. return -E2BIG;
  149. }
  150. void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
  151. {
  152. int i = cgroup_atype - CGROUP_LSM_START;
  153. lockdep_assert_held(&cgroup_mutex);
  154. WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
  155. cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
  156. cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
  157. cgroup_lsm_atype[i].refcnt++;
  158. }
  159. void bpf_cgroup_atype_put(int cgroup_atype)
  160. {
  161. int i = cgroup_atype - CGROUP_LSM_START;
  162. cgroup_lock();
  163. if (--cgroup_lsm_atype[i].refcnt <= 0)
  164. cgroup_lsm_atype[i].attach_btf_id = 0;
  165. WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
  166. cgroup_unlock();
  167. }
  168. #else
  169. static enum cgroup_bpf_attach_type
  170. bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
  171. {
  172. if (attach_type != BPF_LSM_CGROUP)
  173. return to_cgroup_bpf_attach_type(attach_type);
  174. return -EOPNOTSUPP;
  175. }
  176. #endif /* CONFIG_BPF_LSM */
  177. void cgroup_bpf_offline(struct cgroup *cgrp)
  178. {
  179. cgroup_get(cgrp);
  180. percpu_ref_kill(&cgrp->bpf.refcnt);
  181. }
  182. static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
  183. {
  184. enum bpf_cgroup_storage_type stype;
  185. for_each_cgroup_storage_type(stype)
  186. bpf_cgroup_storage_free(storages[stype]);
  187. }
  188. static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
  189. struct bpf_cgroup_storage *new_storages[],
  190. enum bpf_attach_type type,
  191. struct bpf_prog *prog,
  192. struct cgroup *cgrp)
  193. {
  194. enum bpf_cgroup_storage_type stype;
  195. struct bpf_cgroup_storage_key key;
  196. struct bpf_map *map;
  197. key.cgroup_inode_id = cgroup_id(cgrp);
  198. key.attach_type = type;
  199. for_each_cgroup_storage_type(stype) {
  200. map = prog->aux->cgroup_storage[stype];
  201. if (!map)
  202. continue;
  203. storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
  204. if (storages[stype])
  205. continue;
  206. storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
  207. if (IS_ERR(storages[stype])) {
  208. bpf_cgroup_storages_free(new_storages);
  209. return -ENOMEM;
  210. }
  211. new_storages[stype] = storages[stype];
  212. }
  213. return 0;
  214. }
  215. static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
  216. struct bpf_cgroup_storage *src[])
  217. {
  218. enum bpf_cgroup_storage_type stype;
  219. for_each_cgroup_storage_type(stype)
  220. dst[stype] = src[stype];
  221. }
  222. static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
  223. struct cgroup *cgrp,
  224. enum bpf_attach_type attach_type)
  225. {
  226. enum bpf_cgroup_storage_type stype;
  227. for_each_cgroup_storage_type(stype)
  228. bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
  229. }
  230. /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
  231. * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
  232. * doesn't free link memory, which will eventually be done by bpf_link's
  233. * release() callback, when its last FD is closed.
  234. */
  235. static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
  236. {
  237. cgroup_put(link->cgroup);
  238. link->cgroup = NULL;
  239. }
  240. /**
  241. * cgroup_bpf_release() - put references of all bpf programs and
  242. * release all cgroup bpf data
  243. * @work: work structure embedded into the cgroup to modify
  244. */
  245. static void cgroup_bpf_release(struct work_struct *work)
  246. {
  247. struct cgroup *p, *cgrp = container_of(work, struct cgroup,
  248. bpf.release_work);
  249. struct bpf_prog_array *old_array;
  250. struct list_head *storages = &cgrp->bpf.storages;
  251. struct bpf_cgroup_storage *storage, *stmp;
  252. unsigned int atype;
  253. cgroup_lock();
  254. for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
  255. struct hlist_head *progs = &cgrp->bpf.progs[atype];
  256. struct bpf_prog_list *pl;
  257. struct hlist_node *pltmp;
  258. hlist_for_each_entry_safe(pl, pltmp, progs, node) {
  259. hlist_del(&pl->node);
  260. if (pl->prog) {
  261. if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
  262. bpf_trampoline_unlink_cgroup_shim(pl->prog);
  263. bpf_prog_put(pl->prog);
  264. }
  265. if (pl->link) {
  266. if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
  267. bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
  268. bpf_cgroup_link_auto_detach(pl->link);
  269. }
  270. kfree(pl);
  271. static_branch_dec(&cgroup_bpf_enabled_key[atype]);
  272. }
  273. old_array = rcu_dereference_protected(
  274. cgrp->bpf.effective[atype],
  275. lockdep_is_held(&cgroup_mutex));
  276. bpf_prog_array_free(old_array);
  277. }
  278. list_for_each_entry_safe(storage, stmp, storages, list_cg) {
  279. bpf_cgroup_storage_unlink(storage);
  280. bpf_cgroup_storage_free(storage);
  281. }
  282. cgroup_unlock();
  283. for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
  284. cgroup_bpf_put(p);
  285. percpu_ref_exit(&cgrp->bpf.refcnt);
  286. cgroup_put(cgrp);
  287. }
  288. /**
  289. * cgroup_bpf_release_fn() - callback used to schedule releasing
  290. * of bpf cgroup data
  291. * @ref: percpu ref counter structure
  292. */
  293. static void cgroup_bpf_release_fn(struct percpu_ref *ref)
  294. {
  295. struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
  296. INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
  297. queue_work(cgroup_bpf_destroy_wq, &cgrp->bpf.release_work);
  298. }
  299. /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
  300. * link or direct prog.
  301. */
  302. static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
  303. {
  304. if (pl->prog)
  305. return pl->prog;
  306. if (pl->link)
  307. return pl->link->link.prog;
  308. return NULL;
  309. }
  310. /* count number of elements in the list.
  311. * it's slow but the list cannot be long
  312. */
  313. static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt)
  314. {
  315. struct bpf_prog_list *pl;
  316. u32 cnt = 0;
  317. hlist_for_each_entry(pl, head, node) {
  318. if (!prog_list_prog(pl))
  319. continue;
  320. if (preorder_cnt && (pl->flags & BPF_F_PREORDER))
  321. (*preorder_cnt)++;
  322. cnt++;
  323. }
  324. return cnt;
  325. }
  326. /* if parent has non-overridable prog attached,
  327. * disallow attaching new programs to the descendent cgroup.
  328. * if parent has overridable or multi-prog, allow attaching
  329. */
  330. static bool hierarchy_allows_attach(struct cgroup *cgrp,
  331. enum cgroup_bpf_attach_type atype)
  332. {
  333. struct cgroup *p;
  334. p = cgroup_parent(cgrp);
  335. if (!p)
  336. return true;
  337. do {
  338. u32 flags = p->bpf.flags[atype];
  339. u32 cnt;
  340. if (flags & BPF_F_ALLOW_MULTI)
  341. return true;
  342. cnt = prog_list_length(&p->bpf.progs[atype], NULL);
  343. WARN_ON_ONCE(cnt > 1);
  344. if (cnt == 1)
  345. return !!(flags & BPF_F_ALLOW_OVERRIDE);
  346. p = cgroup_parent(p);
  347. } while (p);
  348. return true;
  349. }
  350. /* compute a chain of effective programs for a given cgroup:
  351. * start from the list of programs in this cgroup and add
  352. * all parent programs.
  353. * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
  354. * to programs in this cgroup
  355. */
  356. static int compute_effective_progs(struct cgroup *cgrp,
  357. enum cgroup_bpf_attach_type atype,
  358. struct bpf_prog_array **array)
  359. {
  360. struct bpf_prog_array_item *item;
  361. struct bpf_prog_array *progs;
  362. struct bpf_prog_list *pl;
  363. struct cgroup *p = cgrp;
  364. int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart;
  365. /* count number of effective programs by walking parents */
  366. do {
  367. if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
  368. cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt);
  369. p = cgroup_parent(p);
  370. } while (p);
  371. progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
  372. if (!progs)
  373. return -ENOMEM;
  374. /* populate the array with effective progs */
  375. cnt = 0;
  376. p = cgrp;
  377. fstart = preorder_cnt;
  378. bstart = preorder_cnt - 1;
  379. do {
  380. if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
  381. continue;
  382. init_bstart = bstart;
  383. hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
  384. if (!prog_list_prog(pl))
  385. continue;
  386. if (pl->flags & BPF_F_PREORDER) {
  387. item = &progs->items[bstart];
  388. bstart--;
  389. } else {
  390. item = &progs->items[fstart];
  391. fstart++;
  392. }
  393. item->prog = prog_list_prog(pl);
  394. bpf_cgroup_storages_assign(item->cgroup_storage,
  395. pl->storage);
  396. cnt++;
  397. }
  398. /* reverse pre-ordering progs at this cgroup level */
  399. for (i = bstart + 1, j = init_bstart; i < j; i++, j--)
  400. swap(progs->items[i], progs->items[j]);
  401. } while ((p = cgroup_parent(p)));
  402. *array = progs;
  403. return 0;
  404. }
  405. static void activate_effective_progs(struct cgroup *cgrp,
  406. enum cgroup_bpf_attach_type atype,
  407. struct bpf_prog_array *old_array)
  408. {
  409. old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
  410. lockdep_is_held(&cgroup_mutex));
  411. /* free prog array after grace period, since __cgroup_bpf_run_*()
  412. * might be still walking the array
  413. */
  414. bpf_prog_array_free(old_array);
  415. }
  416. /**
  417. * cgroup_bpf_inherit() - inherit effective programs from parent
  418. * @cgrp: the cgroup to modify
  419. */
  420. int cgroup_bpf_inherit(struct cgroup *cgrp)
  421. {
  422. /* has to use marco instead of const int, since compiler thinks
  423. * that array below is variable length
  424. */
  425. #define NR ARRAY_SIZE(cgrp->bpf.effective)
  426. struct bpf_prog_array *arrays[NR] = {};
  427. struct cgroup *p;
  428. int ret, i;
  429. ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
  430. GFP_KERNEL);
  431. if (ret)
  432. return ret;
  433. for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
  434. cgroup_bpf_get(p);
  435. for (i = 0; i < NR; i++)
  436. INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
  437. INIT_LIST_HEAD(&cgrp->bpf.storages);
  438. for (i = 0; i < NR; i++)
  439. if (compute_effective_progs(cgrp, i, &arrays[i]))
  440. goto cleanup;
  441. for (i = 0; i < NR; i++)
  442. activate_effective_progs(cgrp, i, arrays[i]);
  443. return 0;
  444. cleanup:
  445. for (i = 0; i < NR; i++)
  446. bpf_prog_array_free(arrays[i]);
  447. for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
  448. cgroup_bpf_put(p);
  449. percpu_ref_exit(&cgrp->bpf.refcnt);
  450. return -ENOMEM;
  451. }
  452. static int update_effective_progs(struct cgroup *cgrp,
  453. enum cgroup_bpf_attach_type atype)
  454. {
  455. struct cgroup_subsys_state *css;
  456. int err;
  457. /* allocate and recompute effective prog arrays */
  458. css_for_each_descendant_pre(css, &cgrp->self) {
  459. struct cgroup *desc = container_of(css, struct cgroup, self);
  460. if (percpu_ref_is_zero(&desc->bpf.refcnt))
  461. continue;
  462. err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
  463. if (err)
  464. goto cleanup;
  465. }
  466. /* all allocations were successful. Activate all prog arrays */
  467. css_for_each_descendant_pre(css, &cgrp->self) {
  468. struct cgroup *desc = container_of(css, struct cgroup, self);
  469. if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
  470. if (unlikely(desc->bpf.inactive)) {
  471. bpf_prog_array_free(desc->bpf.inactive);
  472. desc->bpf.inactive = NULL;
  473. }
  474. continue;
  475. }
  476. activate_effective_progs(desc, atype, desc->bpf.inactive);
  477. desc->bpf.inactive = NULL;
  478. }
  479. return 0;
  480. cleanup:
  481. /* oom while computing effective. Free all computed effective arrays
  482. * since they were not activated
  483. */
  484. css_for_each_descendant_pre(css, &cgrp->self) {
  485. struct cgroup *desc = container_of(css, struct cgroup, self);
  486. bpf_prog_array_free(desc->bpf.inactive);
  487. desc->bpf.inactive = NULL;
  488. }
  489. return err;
  490. }
  491. #define BPF_CGROUP_MAX_PROGS 64
  492. static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
  493. struct bpf_prog *prog,
  494. struct bpf_cgroup_link *link,
  495. struct bpf_prog *replace_prog,
  496. bool allow_multi)
  497. {
  498. struct bpf_prog_list *pl;
  499. /* single-attach case */
  500. if (!allow_multi) {
  501. if (hlist_empty(progs))
  502. return NULL;
  503. return hlist_entry(progs->first, typeof(*pl), node);
  504. }
  505. hlist_for_each_entry(pl, progs, node) {
  506. if (prog && pl->prog == prog && prog != replace_prog)
  507. /* disallow attaching the same prog twice */
  508. return ERR_PTR(-EINVAL);
  509. if (link && pl->link == link)
  510. /* disallow attaching the same link twice */
  511. return ERR_PTR(-EINVAL);
  512. }
  513. /* direct prog multi-attach w/ replacement case */
  514. if (replace_prog) {
  515. hlist_for_each_entry(pl, progs, node) {
  516. if (pl->prog == replace_prog)
  517. /* a match found */
  518. return pl;
  519. }
  520. /* prog to replace not found for cgroup */
  521. return ERR_PTR(-ENOENT);
  522. }
  523. return NULL;
  524. }
  525. /**
  526. * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
  527. * propagate the change to descendants
  528. * @cgrp: The cgroup which descendants to traverse
  529. * @prog: A program to attach
  530. * @link: A link to attach
  531. * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
  532. * @type: Type of attach operation
  533. * @flags: Option flags
  534. *
  535. * Exactly one of @prog or @link can be non-null.
  536. * Must be called with cgroup_mutex held.
  537. */
  538. static int __cgroup_bpf_attach(struct cgroup *cgrp,
  539. struct bpf_prog *prog, struct bpf_prog *replace_prog,
  540. struct bpf_cgroup_link *link,
  541. enum bpf_attach_type type, u32 flags)
  542. {
  543. u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
  544. struct bpf_prog *old_prog = NULL;
  545. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
  546. struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
  547. struct bpf_prog *new_prog = prog ? : link->link.prog;
  548. enum cgroup_bpf_attach_type atype;
  549. struct bpf_prog_list *pl;
  550. struct hlist_head *progs;
  551. int err;
  552. if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
  553. ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
  554. /* invalid combination */
  555. return -EINVAL;
  556. if (link && (prog || replace_prog))
  557. /* only either link or prog/replace_prog can be specified */
  558. return -EINVAL;
  559. if (!!replace_prog != !!(flags & BPF_F_REPLACE))
  560. /* replace_prog implies BPF_F_REPLACE, and vice versa */
  561. return -EINVAL;
  562. atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
  563. if (atype < 0)
  564. return -EINVAL;
  565. progs = &cgrp->bpf.progs[atype];
  566. if (!hierarchy_allows_attach(cgrp, atype))
  567. return -EPERM;
  568. if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
  569. /* Disallow attaching non-overridable on top
  570. * of existing overridable in this cgroup.
  571. * Disallow attaching multi-prog if overridable or none
  572. */
  573. return -EPERM;
  574. if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS)
  575. return -E2BIG;
  576. pl = find_attach_entry(progs, prog, link, replace_prog,
  577. flags & BPF_F_ALLOW_MULTI);
  578. if (IS_ERR(pl))
  579. return PTR_ERR(pl);
  580. if (bpf_cgroup_storages_alloc(storage, new_storage, type,
  581. prog ? : link->link.prog, cgrp))
  582. return -ENOMEM;
  583. if (pl) {
  584. old_prog = pl->prog;
  585. } else {
  586. struct hlist_node *last = NULL;
  587. pl = kmalloc(sizeof(*pl), GFP_KERNEL);
  588. if (!pl) {
  589. bpf_cgroup_storages_free(new_storage);
  590. return -ENOMEM;
  591. }
  592. if (hlist_empty(progs))
  593. hlist_add_head(&pl->node, progs);
  594. else
  595. hlist_for_each(last, progs) {
  596. if (last->next)
  597. continue;
  598. hlist_add_behind(&pl->node, last);
  599. break;
  600. }
  601. }
  602. pl->prog = prog;
  603. pl->link = link;
  604. pl->flags = flags;
  605. bpf_cgroup_storages_assign(pl->storage, storage);
  606. cgrp->bpf.flags[atype] = saved_flags;
  607. if (type == BPF_LSM_CGROUP) {
  608. err = bpf_trampoline_link_cgroup_shim(new_prog, atype);
  609. if (err)
  610. goto cleanup;
  611. }
  612. err = update_effective_progs(cgrp, atype);
  613. if (err)
  614. goto cleanup_trampoline;
  615. if (old_prog) {
  616. if (type == BPF_LSM_CGROUP)
  617. bpf_trampoline_unlink_cgroup_shim(old_prog);
  618. bpf_prog_put(old_prog);
  619. } else {
  620. static_branch_inc(&cgroup_bpf_enabled_key[atype]);
  621. }
  622. bpf_cgroup_storages_link(new_storage, cgrp, type);
  623. return 0;
  624. cleanup_trampoline:
  625. if (type == BPF_LSM_CGROUP)
  626. bpf_trampoline_unlink_cgroup_shim(new_prog);
  627. cleanup:
  628. if (old_prog) {
  629. pl->prog = old_prog;
  630. pl->link = NULL;
  631. }
  632. bpf_cgroup_storages_free(new_storage);
  633. if (!old_prog) {
  634. hlist_del(&pl->node);
  635. kfree(pl);
  636. }
  637. return err;
  638. }
  639. static int cgroup_bpf_attach(struct cgroup *cgrp,
  640. struct bpf_prog *prog, struct bpf_prog *replace_prog,
  641. struct bpf_cgroup_link *link,
  642. enum bpf_attach_type type,
  643. u32 flags)
  644. {
  645. int ret;
  646. cgroup_lock();
  647. ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
  648. cgroup_unlock();
  649. return ret;
  650. }
  651. /* Swap updated BPF program for given link in effective program arrays across
  652. * all descendant cgroups. This function is guaranteed to succeed.
  653. */
  654. static void replace_effective_prog(struct cgroup *cgrp,
  655. enum cgroup_bpf_attach_type atype,
  656. struct bpf_cgroup_link *link)
  657. {
  658. struct bpf_prog_array_item *item;
  659. struct cgroup_subsys_state *css;
  660. struct bpf_prog_array *progs;
  661. struct bpf_prog_list *pl;
  662. struct hlist_head *head;
  663. struct cgroup *cg;
  664. int pos;
  665. css_for_each_descendant_pre(css, &cgrp->self) {
  666. struct cgroup *desc = container_of(css, struct cgroup, self);
  667. if (percpu_ref_is_zero(&desc->bpf.refcnt))
  668. continue;
  669. /* find position of link in effective progs array */
  670. for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
  671. if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
  672. continue;
  673. head = &cg->bpf.progs[atype];
  674. hlist_for_each_entry(pl, head, node) {
  675. if (!prog_list_prog(pl))
  676. continue;
  677. if (pl->link == link)
  678. goto found;
  679. pos++;
  680. }
  681. }
  682. found:
  683. BUG_ON(!cg);
  684. progs = rcu_dereference_protected(
  685. desc->bpf.effective[atype],
  686. lockdep_is_held(&cgroup_mutex));
  687. item = &progs->items[pos];
  688. WRITE_ONCE(item->prog, link->link.prog);
  689. }
  690. }
  691. /**
  692. * __cgroup_bpf_replace() - Replace link's program and propagate the change
  693. * to descendants
  694. * @cgrp: The cgroup which descendants to traverse
  695. * @link: A link for which to replace BPF program
  696. * @new_prog: &struct bpf_prog for the target BPF program with its refcnt
  697. * incremented
  698. *
  699. * Must be called with cgroup_mutex held.
  700. */
  701. static int __cgroup_bpf_replace(struct cgroup *cgrp,
  702. struct bpf_cgroup_link *link,
  703. struct bpf_prog *new_prog)
  704. {
  705. enum cgroup_bpf_attach_type atype;
  706. struct bpf_prog *old_prog;
  707. struct bpf_prog_list *pl;
  708. struct hlist_head *progs;
  709. bool found = false;
  710. atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id);
  711. if (atype < 0)
  712. return -EINVAL;
  713. progs = &cgrp->bpf.progs[atype];
  714. if (link->link.prog->type != new_prog->type)
  715. return -EINVAL;
  716. hlist_for_each_entry(pl, progs, node) {
  717. if (pl->link == link) {
  718. found = true;
  719. break;
  720. }
  721. }
  722. if (!found)
  723. return -ENOENT;
  724. old_prog = xchg(&link->link.prog, new_prog);
  725. replace_effective_prog(cgrp, atype, link);
  726. bpf_prog_put(old_prog);
  727. return 0;
  728. }
  729. static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
  730. struct bpf_prog *old_prog)
  731. {
  732. struct bpf_cgroup_link *cg_link;
  733. int ret;
  734. cg_link = container_of(link, struct bpf_cgroup_link, link);
  735. cgroup_lock();
  736. /* link might have been auto-released by dying cgroup, so fail */
  737. if (!cg_link->cgroup) {
  738. ret = -ENOLINK;
  739. goto out_unlock;
  740. }
  741. if (old_prog && link->prog != old_prog) {
  742. ret = -EPERM;
  743. goto out_unlock;
  744. }
  745. ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
  746. out_unlock:
  747. cgroup_unlock();
  748. return ret;
  749. }
  750. static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
  751. struct bpf_prog *prog,
  752. struct bpf_cgroup_link *link,
  753. bool allow_multi)
  754. {
  755. struct bpf_prog_list *pl;
  756. if (!allow_multi) {
  757. if (hlist_empty(progs))
  758. /* report error when trying to detach and nothing is attached */
  759. return ERR_PTR(-ENOENT);
  760. /* to maintain backward compatibility NONE and OVERRIDE cgroups
  761. * allow detaching with invalid FD (prog==NULL) in legacy mode
  762. */
  763. return hlist_entry(progs->first, typeof(*pl), node);
  764. }
  765. if (!prog && !link)
  766. /* to detach MULTI prog the user has to specify valid FD
  767. * of the program or link to be detached
  768. */
  769. return ERR_PTR(-EINVAL);
  770. /* find the prog or link and detach it */
  771. hlist_for_each_entry(pl, progs, node) {
  772. if (pl->prog == prog && pl->link == link)
  773. return pl;
  774. }
  775. return ERR_PTR(-ENOENT);
  776. }
  777. /**
  778. * purge_effective_progs() - After compute_effective_progs fails to alloc new
  779. * cgrp->bpf.inactive table we can recover by
  780. * recomputing the array in place.
  781. *
  782. * @cgrp: The cgroup which descendants to travers
  783. * @prog: A program to detach or NULL
  784. * @link: A link to detach or NULL
  785. * @atype: Type of detach operation
  786. */
  787. static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
  788. struct bpf_cgroup_link *link,
  789. enum cgroup_bpf_attach_type atype)
  790. {
  791. struct cgroup_subsys_state *css;
  792. struct bpf_prog_array *progs;
  793. struct bpf_prog_list *pl;
  794. struct hlist_head *head;
  795. struct cgroup *cg;
  796. int pos;
  797. /* recompute effective prog array in place */
  798. css_for_each_descendant_pre(css, &cgrp->self) {
  799. struct cgroup *desc = container_of(css, struct cgroup, self);
  800. if (percpu_ref_is_zero(&desc->bpf.refcnt))
  801. continue;
  802. /* find position of link or prog in effective progs array */
  803. for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
  804. if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
  805. continue;
  806. head = &cg->bpf.progs[atype];
  807. hlist_for_each_entry(pl, head, node) {
  808. if (!prog_list_prog(pl))
  809. continue;
  810. if (pl->prog == prog && pl->link == link)
  811. goto found;
  812. pos++;
  813. }
  814. }
  815. /* no link or prog match, skip the cgroup of this layer */
  816. continue;
  817. found:
  818. progs = rcu_dereference_protected(
  819. desc->bpf.effective[atype],
  820. lockdep_is_held(&cgroup_mutex));
  821. /* Remove the program from the array */
  822. WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
  823. "Failed to purge a prog from array at index %d", pos);
  824. }
  825. }
  826. /**
  827. * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
  828. * propagate the change to descendants
  829. * @cgrp: The cgroup which descendants to traverse
  830. * @prog: A program to detach or NULL
  831. * @link: A link to detach or NULL
  832. * @type: Type of detach operation
  833. *
  834. * At most one of @prog or @link can be non-NULL.
  835. * Must be called with cgroup_mutex held.
  836. */
  837. static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  838. struct bpf_cgroup_link *link, enum bpf_attach_type type)
  839. {
  840. enum cgroup_bpf_attach_type atype;
  841. struct bpf_prog *old_prog;
  842. struct bpf_prog_list *pl;
  843. struct hlist_head *progs;
  844. u32 attach_btf_id = 0;
  845. u32 flags;
  846. if (prog)
  847. attach_btf_id = prog->aux->attach_btf_id;
  848. if (link)
  849. attach_btf_id = link->link.prog->aux->attach_btf_id;
  850. atype = bpf_cgroup_atype_find(type, attach_btf_id);
  851. if (atype < 0)
  852. return -EINVAL;
  853. progs = &cgrp->bpf.progs[atype];
  854. flags = cgrp->bpf.flags[atype];
  855. if (prog && link)
  856. /* only one of prog or link can be specified */
  857. return -EINVAL;
  858. pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
  859. if (IS_ERR(pl))
  860. return PTR_ERR(pl);
  861. /* mark it deleted, so it's ignored while recomputing effective */
  862. old_prog = pl->prog;
  863. pl->prog = NULL;
  864. pl->link = NULL;
  865. if (update_effective_progs(cgrp, atype)) {
  866. /* if update effective array failed replace the prog with a dummy prog*/
  867. pl->prog = old_prog;
  868. pl->link = link;
  869. purge_effective_progs(cgrp, old_prog, link, atype);
  870. }
  871. /* now can actually delete it from this cgroup list */
  872. hlist_del(&pl->node);
  873. kfree(pl);
  874. if (hlist_empty(progs))
  875. /* last program was detached, reset flags to zero */
  876. cgrp->bpf.flags[atype] = 0;
  877. if (old_prog) {
  878. if (type == BPF_LSM_CGROUP)
  879. bpf_trampoline_unlink_cgroup_shim(old_prog);
  880. bpf_prog_put(old_prog);
  881. }
  882. static_branch_dec(&cgroup_bpf_enabled_key[atype]);
  883. return 0;
  884. }
  885. static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  886. enum bpf_attach_type type)
  887. {
  888. int ret;
  889. cgroup_lock();
  890. ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
  891. cgroup_unlock();
  892. return ret;
  893. }
  894. /* Must be called with cgroup_mutex held to avoid races. */
  895. static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  896. union bpf_attr __user *uattr)
  897. {
  898. __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
  899. bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE;
  900. __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
  901. enum bpf_attach_type type = attr->query.attach_type;
  902. enum cgroup_bpf_attach_type from_atype, to_atype;
  903. enum cgroup_bpf_attach_type atype;
  904. struct bpf_prog_array *effective;
  905. int cnt, ret = 0, i;
  906. int total_cnt = 0;
  907. u32 flags;
  908. if (effective_query && prog_attach_flags)
  909. return -EINVAL;
  910. if (type == BPF_LSM_CGROUP) {
  911. if (!effective_query && attr->query.prog_cnt &&
  912. prog_ids && !prog_attach_flags)
  913. return -EINVAL;
  914. from_atype = CGROUP_LSM_START;
  915. to_atype = CGROUP_LSM_END;
  916. flags = 0;
  917. } else {
  918. from_atype = to_cgroup_bpf_attach_type(type);
  919. if (from_atype < 0)
  920. return -EINVAL;
  921. to_atype = from_atype;
  922. flags = cgrp->bpf.flags[from_atype];
  923. }
  924. for (atype = from_atype; atype <= to_atype; atype++) {
  925. if (effective_query) {
  926. effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
  927. lockdep_is_held(&cgroup_mutex));
  928. total_cnt += bpf_prog_array_length(effective);
  929. } else {
  930. total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL);
  931. }
  932. }
  933. /* always output uattr->query.attach_flags as 0 during effective query */
  934. flags = effective_query ? 0 : flags;
  935. if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
  936. return -EFAULT;
  937. if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
  938. return -EFAULT;
  939. if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
  940. /* return early if user requested only program count + flags */
  941. return 0;
  942. if (attr->query.prog_cnt < total_cnt) {
  943. total_cnt = attr->query.prog_cnt;
  944. ret = -ENOSPC;
  945. }
  946. for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
  947. if (effective_query) {
  948. effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
  949. lockdep_is_held(&cgroup_mutex));
  950. cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
  951. ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
  952. } else {
  953. struct hlist_head *progs;
  954. struct bpf_prog_list *pl;
  955. struct bpf_prog *prog;
  956. u32 id;
  957. progs = &cgrp->bpf.progs[atype];
  958. cnt = min_t(int, prog_list_length(progs, NULL), total_cnt);
  959. i = 0;
  960. hlist_for_each_entry(pl, progs, node) {
  961. prog = prog_list_prog(pl);
  962. id = prog->aux->id;
  963. if (copy_to_user(prog_ids + i, &id, sizeof(id)))
  964. return -EFAULT;
  965. if (++i == cnt)
  966. break;
  967. }
  968. if (prog_attach_flags) {
  969. flags = cgrp->bpf.flags[atype];
  970. for (i = 0; i < cnt; i++)
  971. if (copy_to_user(prog_attach_flags + i,
  972. &flags, sizeof(flags)))
  973. return -EFAULT;
  974. prog_attach_flags += cnt;
  975. }
  976. }
  977. prog_ids += cnt;
  978. total_cnt -= cnt;
  979. }
  980. return ret;
  981. }
  982. static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  983. union bpf_attr __user *uattr)
  984. {
  985. int ret;
  986. cgroup_lock();
  987. ret = __cgroup_bpf_query(cgrp, attr, uattr);
  988. cgroup_unlock();
  989. return ret;
  990. }
  991. int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  992. enum bpf_prog_type ptype, struct bpf_prog *prog)
  993. {
  994. struct bpf_prog *replace_prog = NULL;
  995. struct cgroup *cgrp;
  996. int ret;
  997. cgrp = cgroup_get_from_fd(attr->target_fd);
  998. if (IS_ERR(cgrp))
  999. return PTR_ERR(cgrp);
  1000. if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
  1001. (attr->attach_flags & BPF_F_REPLACE)) {
  1002. replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
  1003. if (IS_ERR(replace_prog)) {
  1004. cgroup_put(cgrp);
  1005. return PTR_ERR(replace_prog);
  1006. }
  1007. }
  1008. ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
  1009. attr->attach_type, attr->attach_flags);
  1010. if (replace_prog)
  1011. bpf_prog_put(replace_prog);
  1012. cgroup_put(cgrp);
  1013. return ret;
  1014. }
  1015. int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  1016. {
  1017. struct bpf_prog *prog;
  1018. struct cgroup *cgrp;
  1019. int ret;
  1020. cgrp = cgroup_get_from_fd(attr->target_fd);
  1021. if (IS_ERR(cgrp))
  1022. return PTR_ERR(cgrp);
  1023. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  1024. if (IS_ERR(prog))
  1025. prog = NULL;
  1026. ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
  1027. if (prog)
  1028. bpf_prog_put(prog);
  1029. cgroup_put(cgrp);
  1030. return ret;
  1031. }
  1032. static void bpf_cgroup_link_release(struct bpf_link *link)
  1033. {
  1034. struct bpf_cgroup_link *cg_link =
  1035. container_of(link, struct bpf_cgroup_link, link);
  1036. struct cgroup *cg;
  1037. /* link might have been auto-detached by dying cgroup already,
  1038. * in that case our work is done here
  1039. */
  1040. if (!cg_link->cgroup)
  1041. return;
  1042. cgroup_lock();
  1043. /* re-check cgroup under lock again */
  1044. if (!cg_link->cgroup) {
  1045. cgroup_unlock();
  1046. return;
  1047. }
  1048. WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
  1049. cg_link->type));
  1050. if (cg_link->type == BPF_LSM_CGROUP)
  1051. bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
  1052. cg = cg_link->cgroup;
  1053. cg_link->cgroup = NULL;
  1054. cgroup_unlock();
  1055. cgroup_put(cg);
  1056. }
  1057. static void bpf_cgroup_link_dealloc(struct bpf_link *link)
  1058. {
  1059. struct bpf_cgroup_link *cg_link =
  1060. container_of(link, struct bpf_cgroup_link, link);
  1061. kfree(cg_link);
  1062. }
  1063. static int bpf_cgroup_link_detach(struct bpf_link *link)
  1064. {
  1065. bpf_cgroup_link_release(link);
  1066. return 0;
  1067. }
  1068. static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
  1069. struct seq_file *seq)
  1070. {
  1071. struct bpf_cgroup_link *cg_link =
  1072. container_of(link, struct bpf_cgroup_link, link);
  1073. u64 cg_id = 0;
  1074. cgroup_lock();
  1075. if (cg_link->cgroup)
  1076. cg_id = cgroup_id(cg_link->cgroup);
  1077. cgroup_unlock();
  1078. seq_printf(seq,
  1079. "cgroup_id:\t%llu\n"
  1080. "attach_type:\t%d\n",
  1081. cg_id,
  1082. cg_link->type);
  1083. }
  1084. static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
  1085. struct bpf_link_info *info)
  1086. {
  1087. struct bpf_cgroup_link *cg_link =
  1088. container_of(link, struct bpf_cgroup_link, link);
  1089. u64 cg_id = 0;
  1090. cgroup_lock();
  1091. if (cg_link->cgroup)
  1092. cg_id = cgroup_id(cg_link->cgroup);
  1093. cgroup_unlock();
  1094. info->cgroup.cgroup_id = cg_id;
  1095. info->cgroup.attach_type = cg_link->type;
  1096. return 0;
  1097. }
  1098. static const struct bpf_link_ops bpf_cgroup_link_lops = {
  1099. .release = bpf_cgroup_link_release,
  1100. .dealloc = bpf_cgroup_link_dealloc,
  1101. .detach = bpf_cgroup_link_detach,
  1102. .update_prog = cgroup_bpf_replace,
  1103. .show_fdinfo = bpf_cgroup_link_show_fdinfo,
  1104. .fill_link_info = bpf_cgroup_link_fill_link_info,
  1105. };
  1106. int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  1107. {
  1108. struct bpf_link_primer link_primer;
  1109. struct bpf_cgroup_link *link;
  1110. struct cgroup *cgrp;
  1111. int err;
  1112. if (attr->link_create.flags)
  1113. return -EINVAL;
  1114. cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
  1115. if (IS_ERR(cgrp))
  1116. return PTR_ERR(cgrp);
  1117. link = kzalloc(sizeof(*link), GFP_USER);
  1118. if (!link) {
  1119. err = -ENOMEM;
  1120. goto out_put_cgroup;
  1121. }
  1122. bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
  1123. prog);
  1124. link->cgroup = cgrp;
  1125. link->type = attr->link_create.attach_type;
  1126. err = bpf_link_prime(&link->link, &link_primer);
  1127. if (err) {
  1128. kfree(link);
  1129. goto out_put_cgroup;
  1130. }
  1131. err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
  1132. link->type, BPF_F_ALLOW_MULTI);
  1133. if (err) {
  1134. bpf_link_cleanup(&link_primer);
  1135. goto out_put_cgroup;
  1136. }
  1137. return bpf_link_settle(&link_primer);
  1138. out_put_cgroup:
  1139. cgroup_put(cgrp);
  1140. return err;
  1141. }
  1142. int cgroup_bpf_prog_query(const union bpf_attr *attr,
  1143. union bpf_attr __user *uattr)
  1144. {
  1145. struct cgroup *cgrp;
  1146. int ret;
  1147. cgrp = cgroup_get_from_fd(attr->query.target_fd);
  1148. if (IS_ERR(cgrp))
  1149. return PTR_ERR(cgrp);
  1150. ret = cgroup_bpf_query(cgrp, attr, uattr);
  1151. cgroup_put(cgrp);
  1152. return ret;
  1153. }
  1154. /**
  1155. * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
  1156. * @sk: The socket sending or receiving traffic
  1157. * @skb: The skb that is being sent or received
  1158. * @atype: The type of program to be executed
  1159. *
  1160. * If no socket is passed, or the socket is not of type INET or INET6,
  1161. * this function does nothing and returns 0.
  1162. *
  1163. * The program type passed in via @type must be suitable for network
  1164. * filtering. No further check is performed to assert that.
  1165. *
  1166. * For egress packets, this function can return:
  1167. * NET_XMIT_SUCCESS (0) - continue with packet output
  1168. * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
  1169. * NET_XMIT_CN (2) - continue with packet output and notify TCP
  1170. * to call cwr
  1171. * -err - drop packet
  1172. *
  1173. * For ingress packets, this function will return -EPERM if any
  1174. * attached program was found and if it returned != 1 during execution.
  1175. * Otherwise 0 is returned.
  1176. */
  1177. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  1178. struct sk_buff *skb,
  1179. enum cgroup_bpf_attach_type atype)
  1180. {
  1181. unsigned int offset = -skb_network_offset(skb);
  1182. struct sock *save_sk;
  1183. void *saved_data_end;
  1184. struct cgroup *cgrp;
  1185. int ret;
  1186. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
  1187. return 0;
  1188. cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1189. save_sk = skb->sk;
  1190. skb->sk = sk;
  1191. __skb_push(skb, offset);
  1192. /* compute pointers for the bpf prog */
  1193. bpf_compute_and_save_data_end(skb, &saved_data_end);
  1194. if (atype == CGROUP_INET_EGRESS) {
  1195. u32 flags = 0;
  1196. bool cn;
  1197. ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
  1198. __bpf_prog_run_save_cb, 0, &flags);
  1199. /* Return values of CGROUP EGRESS BPF programs are:
  1200. * 0: drop packet
  1201. * 1: keep packet
  1202. * 2: drop packet and cn
  1203. * 3: keep packet and cn
  1204. *
  1205. * The returned value is then converted to one of the NET_XMIT
  1206. * or an error code that is then interpreted as drop packet
  1207. * (and no cn):
  1208. * 0: NET_XMIT_SUCCESS skb should be transmitted
  1209. * 1: NET_XMIT_DROP skb should be dropped and cn
  1210. * 2: NET_XMIT_CN skb should be transmitted and cn
  1211. * 3: -err skb should be dropped
  1212. */
  1213. cn = flags & BPF_RET_SET_CN;
  1214. if (ret && !IS_ERR_VALUE((long)ret))
  1215. ret = -EFAULT;
  1216. if (!ret)
  1217. ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
  1218. else
  1219. ret = (cn ? NET_XMIT_DROP : ret);
  1220. } else {
  1221. ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
  1222. skb, __bpf_prog_run_save_cb, 0,
  1223. NULL);
  1224. if (ret && !IS_ERR_VALUE((long)ret))
  1225. ret = -EFAULT;
  1226. }
  1227. bpf_restore_data_end(skb, saved_data_end);
  1228. __skb_pull(skb, offset);
  1229. skb->sk = save_sk;
  1230. return ret;
  1231. }
  1232. EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
  1233. /**
  1234. * __cgroup_bpf_run_filter_sk() - Run a program on a sock
  1235. * @sk: sock structure to manipulate
  1236. * @atype: The type of program to be executed
  1237. *
  1238. * socket is passed is expected to be of type INET or INET6.
  1239. *
  1240. * The program type passed in via @type must be suitable for sock
  1241. * filtering. No further check is performed to assert that.
  1242. *
  1243. * This function will return %-EPERM if any if an attached program was found
  1244. * and if it returned != 1 during execution. In all other cases, 0 is returned.
  1245. */
  1246. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  1247. enum cgroup_bpf_attach_type atype)
  1248. {
  1249. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1250. return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
  1251. NULL);
  1252. }
  1253. EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
  1254. /**
  1255. * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
  1256. * provided by user sockaddr
  1257. * @sk: sock struct that will use sockaddr
  1258. * @uaddr: sockaddr struct provided by user
  1259. * @uaddrlen: Pointer to the size of the sockaddr struct provided by user. It is
  1260. * read-only for AF_INET[6] uaddr but can be modified for AF_UNIX
  1261. * uaddr.
  1262. * @atype: The type of program to be executed
  1263. * @t_ctx: Pointer to attach type specific context
  1264. * @flags: Pointer to u32 which contains higher bits of BPF program
  1265. * return value (OR'ed together).
  1266. *
  1267. * socket is expected to be of type INET, INET6 or UNIX.
  1268. *
  1269. * This function will return %-EPERM if an attached program is found and
  1270. * returned value != 1 during execution. In all other cases, 0 is returned.
  1271. */
  1272. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  1273. struct sockaddr *uaddr,
  1274. int *uaddrlen,
  1275. enum cgroup_bpf_attach_type atype,
  1276. void *t_ctx,
  1277. u32 *flags)
  1278. {
  1279. struct bpf_sock_addr_kern ctx = {
  1280. .sk = sk,
  1281. .uaddr = uaddr,
  1282. .t_ctx = t_ctx,
  1283. };
  1284. struct sockaddr_storage unspec;
  1285. struct cgroup *cgrp;
  1286. int ret;
  1287. /* Check socket family since not all sockets represent network
  1288. * endpoint (e.g. AF_UNIX).
  1289. */
  1290. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6 &&
  1291. sk->sk_family != AF_UNIX)
  1292. return 0;
  1293. if (!ctx.uaddr) {
  1294. memset(&unspec, 0, sizeof(unspec));
  1295. ctx.uaddr = (struct sockaddr *)&unspec;
  1296. ctx.uaddrlen = 0;
  1297. } else {
  1298. ctx.uaddrlen = *uaddrlen;
  1299. }
  1300. cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1301. ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
  1302. 0, flags);
  1303. if (!ret && uaddr)
  1304. *uaddrlen = ctx.uaddrlen;
  1305. return ret;
  1306. }
  1307. EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
  1308. /**
  1309. * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
  1310. * @sk: socket to get cgroup from
  1311. * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
  1312. * sk with connection information (IP addresses, etc.) May not contain
  1313. * cgroup info if it is a req sock.
  1314. * @atype: The type of program to be executed
  1315. *
  1316. * socket passed is expected to be of type INET or INET6.
  1317. *
  1318. * The program type passed in via @type must be suitable for sock_ops
  1319. * filtering. No further check is performed to assert that.
  1320. *
  1321. * This function will return %-EPERM if any if an attached program was found
  1322. * and if it returned != 1 during execution. In all other cases, 0 is returned.
  1323. */
  1324. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  1325. struct bpf_sock_ops_kern *sock_ops,
  1326. enum cgroup_bpf_attach_type atype)
  1327. {
  1328. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1329. return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
  1330. 0, NULL);
  1331. }
  1332. EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
  1333. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  1334. short access, enum cgroup_bpf_attach_type atype)
  1335. {
  1336. struct cgroup *cgrp;
  1337. struct bpf_cgroup_dev_ctx ctx = {
  1338. .access_type = (access << 16) | dev_type,
  1339. .major = major,
  1340. .minor = minor,
  1341. };
  1342. int ret;
  1343. rcu_read_lock();
  1344. cgrp = task_dfl_cgroup(current);
  1345. ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
  1346. NULL);
  1347. rcu_read_unlock();
  1348. return ret;
  1349. }
  1350. BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
  1351. {
  1352. /* flags argument is not used now,
  1353. * but provides an ability to extend the API.
  1354. * verifier checks that its value is correct.
  1355. */
  1356. enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
  1357. struct bpf_cgroup_storage *storage;
  1358. struct bpf_cg_run_ctx *ctx;
  1359. void *ptr;
  1360. /* get current cgroup storage from BPF run context */
  1361. ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
  1362. storage = ctx->prog_item->cgroup_storage[stype];
  1363. if (stype == BPF_CGROUP_STORAGE_SHARED)
  1364. ptr = &READ_ONCE(storage->buf)->data[0];
  1365. else
  1366. ptr = this_cpu_ptr(storage->percpu_buf);
  1367. return (unsigned long)ptr;
  1368. }
  1369. const struct bpf_func_proto bpf_get_local_storage_proto = {
  1370. .func = bpf_get_local_storage,
  1371. .gpl_only = false,
  1372. .ret_type = RET_PTR_TO_MAP_VALUE,
  1373. .arg1_type = ARG_CONST_MAP_PTR,
  1374. .arg2_type = ARG_ANYTHING,
  1375. };
  1376. BPF_CALL_0(bpf_get_retval)
  1377. {
  1378. struct bpf_cg_run_ctx *ctx =
  1379. container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
  1380. return ctx->retval;
  1381. }
  1382. const struct bpf_func_proto bpf_get_retval_proto = {
  1383. .func = bpf_get_retval,
  1384. .gpl_only = false,
  1385. .ret_type = RET_INTEGER,
  1386. };
  1387. BPF_CALL_1(bpf_set_retval, int, retval)
  1388. {
  1389. struct bpf_cg_run_ctx *ctx =
  1390. container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
  1391. ctx->retval = retval;
  1392. return 0;
  1393. }
  1394. const struct bpf_func_proto bpf_set_retval_proto = {
  1395. .func = bpf_set_retval,
  1396. .gpl_only = false,
  1397. .ret_type = RET_INTEGER,
  1398. .arg1_type = ARG_ANYTHING,
  1399. };
  1400. static const struct bpf_func_proto *
  1401. cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1402. {
  1403. const struct bpf_func_proto *func_proto;
  1404. func_proto = cgroup_common_func_proto(func_id, prog);
  1405. if (func_proto)
  1406. return func_proto;
  1407. func_proto = cgroup_current_func_proto(func_id, prog);
  1408. if (func_proto)
  1409. return func_proto;
  1410. switch (func_id) {
  1411. case BPF_FUNC_perf_event_output:
  1412. return &bpf_event_output_data_proto;
  1413. default:
  1414. return bpf_base_func_proto(func_id, prog);
  1415. }
  1416. }
  1417. static bool cgroup_dev_is_valid_access(int off, int size,
  1418. enum bpf_access_type type,
  1419. const struct bpf_prog *prog,
  1420. struct bpf_insn_access_aux *info)
  1421. {
  1422. const int size_default = sizeof(__u32);
  1423. if (type == BPF_WRITE)
  1424. return false;
  1425. if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
  1426. return false;
  1427. /* The verifier guarantees that size > 0. */
  1428. if (off % size != 0)
  1429. return false;
  1430. switch (off) {
  1431. case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
  1432. bpf_ctx_record_field_size(info, size_default);
  1433. if (!bpf_ctx_narrow_access_ok(off, size, size_default))
  1434. return false;
  1435. break;
  1436. default:
  1437. if (size != size_default)
  1438. return false;
  1439. }
  1440. return true;
  1441. }
  1442. const struct bpf_prog_ops cg_dev_prog_ops = {
  1443. };
  1444. const struct bpf_verifier_ops cg_dev_verifier_ops = {
  1445. .get_func_proto = cgroup_dev_func_proto,
  1446. .is_valid_access = cgroup_dev_is_valid_access,
  1447. };
  1448. /**
  1449. * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
  1450. *
  1451. * @head: sysctl table header
  1452. * @table: sysctl table
  1453. * @write: sysctl is being read (= 0) or written (= 1)
  1454. * @buf: pointer to buffer (in and out)
  1455. * @pcount: value-result argument: value is size of buffer pointed to by @buf,
  1456. * result is size of @new_buf if program set new value, initial value
  1457. * otherwise
  1458. * @ppos: value-result argument: value is position at which read from or write
  1459. * to sysctl is happening, result is new position if program overrode it,
  1460. * initial value otherwise
  1461. * @atype: type of program to be executed
  1462. *
  1463. * Program is run when sysctl is being accessed, either read or written, and
  1464. * can allow or deny such access.
  1465. *
  1466. * This function will return %-EPERM if an attached program is found and
  1467. * returned value != 1 during execution. In all other cases 0 is returned.
  1468. */
  1469. int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
  1470. struct ctl_table *table, int write,
  1471. char **buf, size_t *pcount, loff_t *ppos,
  1472. enum cgroup_bpf_attach_type atype)
  1473. {
  1474. struct bpf_sysctl_kern ctx = {
  1475. .head = head,
  1476. .table = table,
  1477. .write = write,
  1478. .ppos = ppos,
  1479. .cur_val = NULL,
  1480. .cur_len = PAGE_SIZE,
  1481. .new_val = NULL,
  1482. .new_len = 0,
  1483. .new_updated = 0,
  1484. };
  1485. struct cgroup *cgrp;
  1486. loff_t pos = 0;
  1487. int ret;
  1488. ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
  1489. if (!ctx.cur_val ||
  1490. table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
  1491. /* Let BPF program decide how to proceed. */
  1492. ctx.cur_len = 0;
  1493. }
  1494. if (write && *buf && *pcount) {
  1495. /* BPF program should be able to override new value with a
  1496. * buffer bigger than provided by user.
  1497. */
  1498. ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
  1499. ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
  1500. if (ctx.new_val) {
  1501. memcpy(ctx.new_val, *buf, ctx.new_len);
  1502. } else {
  1503. /* Let BPF program decide how to proceed. */
  1504. ctx.new_len = 0;
  1505. }
  1506. }
  1507. rcu_read_lock();
  1508. cgrp = task_dfl_cgroup(current);
  1509. ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
  1510. NULL);
  1511. rcu_read_unlock();
  1512. kfree(ctx.cur_val);
  1513. if (ret == 1 && ctx.new_updated) {
  1514. kfree(*buf);
  1515. *buf = ctx.new_val;
  1516. *pcount = ctx.new_len;
  1517. } else {
  1518. kfree(ctx.new_val);
  1519. }
  1520. return ret;
  1521. }
  1522. #ifdef CONFIG_NET
  1523. static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
  1524. struct bpf_sockopt_buf *buf)
  1525. {
  1526. if (unlikely(max_optlen < 0))
  1527. return -EINVAL;
  1528. if (unlikely(max_optlen > PAGE_SIZE)) {
  1529. /* We don't expose optvals that are greater than PAGE_SIZE
  1530. * to the BPF program.
  1531. */
  1532. max_optlen = PAGE_SIZE;
  1533. }
  1534. if (max_optlen <= sizeof(buf->data)) {
  1535. /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
  1536. * bytes avoid the cost of kzalloc.
  1537. */
  1538. ctx->optval = buf->data;
  1539. ctx->optval_end = ctx->optval + max_optlen;
  1540. return max_optlen;
  1541. }
  1542. ctx->optval = kzalloc(max_optlen, GFP_USER);
  1543. if (!ctx->optval)
  1544. return -ENOMEM;
  1545. ctx->optval_end = ctx->optval + max_optlen;
  1546. return max_optlen;
  1547. }
  1548. static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
  1549. struct bpf_sockopt_buf *buf)
  1550. {
  1551. if (ctx->optval == buf->data)
  1552. return;
  1553. kfree(ctx->optval);
  1554. }
  1555. static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
  1556. struct bpf_sockopt_buf *buf)
  1557. {
  1558. return ctx->optval != buf->data;
  1559. }
  1560. int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
  1561. int *optname, sockptr_t optval,
  1562. int *optlen, char **kernel_optval)
  1563. {
  1564. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1565. struct bpf_sockopt_buf buf = {};
  1566. struct bpf_sockopt_kern ctx = {
  1567. .sk = sk,
  1568. .level = *level,
  1569. .optname = *optname,
  1570. };
  1571. int ret, max_optlen;
  1572. /* Allocate a bit more than the initial user buffer for
  1573. * BPF program. The canonical use case is overriding
  1574. * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
  1575. */
  1576. max_optlen = max_t(int, 16, *optlen);
  1577. max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
  1578. if (max_optlen < 0)
  1579. return max_optlen;
  1580. ctx.optlen = *optlen;
  1581. if (copy_from_sockptr(ctx.optval, optval,
  1582. min(*optlen, max_optlen))) {
  1583. ret = -EFAULT;
  1584. goto out;
  1585. }
  1586. lock_sock(sk);
  1587. ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
  1588. &ctx, bpf_prog_run, 0, NULL);
  1589. release_sock(sk);
  1590. if (ret)
  1591. goto out;
  1592. if (ctx.optlen == -1) {
  1593. /* optlen set to -1, bypass kernel */
  1594. ret = 1;
  1595. } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
  1596. /* optlen is out of bounds */
  1597. if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
  1598. pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
  1599. ctx.optlen, max_optlen);
  1600. ret = 0;
  1601. goto out;
  1602. }
  1603. ret = -EFAULT;
  1604. } else {
  1605. /* optlen within bounds, run kernel handler */
  1606. ret = 0;
  1607. /* export any potential modifications */
  1608. *level = ctx.level;
  1609. *optname = ctx.optname;
  1610. /* optlen == 0 from BPF indicates that we should
  1611. * use original userspace data.
  1612. */
  1613. if (ctx.optlen != 0) {
  1614. *optlen = ctx.optlen;
  1615. /* We've used bpf_sockopt_kern->buf as an intermediary
  1616. * storage, but the BPF program indicates that we need
  1617. * to pass this data to the kernel setsockopt handler.
  1618. * No way to export on-stack buf, have to allocate a
  1619. * new buffer.
  1620. */
  1621. if (!sockopt_buf_allocated(&ctx, &buf)) {
  1622. void *p = kmalloc(ctx.optlen, GFP_USER);
  1623. if (!p) {
  1624. ret = -ENOMEM;
  1625. goto out;
  1626. }
  1627. memcpy(p, ctx.optval, ctx.optlen);
  1628. *kernel_optval = p;
  1629. } else {
  1630. *kernel_optval = ctx.optval;
  1631. }
  1632. /* export and don't free sockopt buf */
  1633. return 0;
  1634. }
  1635. }
  1636. out:
  1637. sockopt_free_buf(&ctx, &buf);
  1638. return ret;
  1639. }
  1640. int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
  1641. int optname, sockptr_t optval,
  1642. sockptr_t optlen, int max_optlen,
  1643. int retval)
  1644. {
  1645. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1646. struct bpf_sockopt_buf buf = {};
  1647. struct bpf_sockopt_kern ctx = {
  1648. .sk = sk,
  1649. .level = level,
  1650. .optname = optname,
  1651. .current_task = current,
  1652. };
  1653. int orig_optlen;
  1654. int ret;
  1655. orig_optlen = max_optlen;
  1656. ctx.optlen = max_optlen;
  1657. max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
  1658. if (max_optlen < 0)
  1659. return max_optlen;
  1660. if (!retval) {
  1661. /* If kernel getsockopt finished successfully,
  1662. * copy whatever was returned to the user back
  1663. * into our temporary buffer. Set optlen to the
  1664. * one that kernel returned as well to let
  1665. * BPF programs inspect the value.
  1666. */
  1667. if (copy_from_sockptr(&ctx.optlen, optlen,
  1668. sizeof(ctx.optlen))) {
  1669. ret = -EFAULT;
  1670. goto out;
  1671. }
  1672. if (ctx.optlen < 0) {
  1673. ret = -EFAULT;
  1674. goto out;
  1675. }
  1676. orig_optlen = ctx.optlen;
  1677. if (copy_from_sockptr(ctx.optval, optval,
  1678. min(ctx.optlen, max_optlen))) {
  1679. ret = -EFAULT;
  1680. goto out;
  1681. }
  1682. }
  1683. lock_sock(sk);
  1684. ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
  1685. &ctx, bpf_prog_run, retval, NULL);
  1686. release_sock(sk);
  1687. if (ret < 0)
  1688. goto out;
  1689. if (!sockptr_is_null(optval) &&
  1690. (ctx.optlen > max_optlen || ctx.optlen < 0)) {
  1691. if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
  1692. pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
  1693. ctx.optlen, max_optlen);
  1694. ret = retval;
  1695. goto out;
  1696. }
  1697. ret = -EFAULT;
  1698. goto out;
  1699. }
  1700. if (ctx.optlen != 0) {
  1701. if (!sockptr_is_null(optval) &&
  1702. copy_to_sockptr(optval, ctx.optval, ctx.optlen)) {
  1703. ret = -EFAULT;
  1704. goto out;
  1705. }
  1706. if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) {
  1707. ret = -EFAULT;
  1708. goto out;
  1709. }
  1710. }
  1711. out:
  1712. sockopt_free_buf(&ctx, &buf);
  1713. return ret;
  1714. }
  1715. int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
  1716. int optname, void *optval,
  1717. int *optlen, int retval)
  1718. {
  1719. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1720. struct bpf_sockopt_kern ctx = {
  1721. .sk = sk,
  1722. .level = level,
  1723. .optname = optname,
  1724. .optlen = *optlen,
  1725. .optval = optval,
  1726. .optval_end = optval + *optlen,
  1727. .current_task = current,
  1728. };
  1729. int ret;
  1730. /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
  1731. * user data back into BPF buffer when reval != 0. This is
  1732. * done as an optimization to avoid extra copy, assuming
  1733. * kernel won't populate the data in case of an error.
  1734. * Here we always pass the data and memset() should
  1735. * be called if that data shouldn't be "exported".
  1736. */
  1737. ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
  1738. &ctx, bpf_prog_run, retval, NULL);
  1739. if (ret < 0)
  1740. return ret;
  1741. if (ctx.optlen > *optlen)
  1742. return -EFAULT;
  1743. /* BPF programs can shrink the buffer, export the modifications.
  1744. */
  1745. if (ctx.optlen != 0)
  1746. *optlen = ctx.optlen;
  1747. return ret;
  1748. }
  1749. #endif
  1750. static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
  1751. size_t *lenp)
  1752. {
  1753. ssize_t tmp_ret = 0, ret;
  1754. if (dir->header.parent) {
  1755. tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
  1756. if (tmp_ret < 0)
  1757. return tmp_ret;
  1758. }
  1759. ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
  1760. if (ret < 0)
  1761. return ret;
  1762. *bufp += ret;
  1763. *lenp -= ret;
  1764. ret += tmp_ret;
  1765. /* Avoid leading slash. */
  1766. if (!ret)
  1767. return ret;
  1768. tmp_ret = strscpy(*bufp, "/", *lenp);
  1769. if (tmp_ret < 0)
  1770. return tmp_ret;
  1771. *bufp += tmp_ret;
  1772. *lenp -= tmp_ret;
  1773. return ret + tmp_ret;
  1774. }
  1775. BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
  1776. size_t, buf_len, u64, flags)
  1777. {
  1778. ssize_t tmp_ret = 0, ret;
  1779. if (!buf)
  1780. return -EINVAL;
  1781. if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
  1782. if (!ctx->head)
  1783. return -EINVAL;
  1784. tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
  1785. if (tmp_ret < 0)
  1786. return tmp_ret;
  1787. }
  1788. ret = strscpy(buf, ctx->table->procname, buf_len);
  1789. return ret < 0 ? ret : tmp_ret + ret;
  1790. }
  1791. static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
  1792. .func = bpf_sysctl_get_name,
  1793. .gpl_only = false,
  1794. .ret_type = RET_INTEGER,
  1795. .arg1_type = ARG_PTR_TO_CTX,
  1796. .arg2_type = ARG_PTR_TO_MEM,
  1797. .arg3_type = ARG_CONST_SIZE,
  1798. .arg4_type = ARG_ANYTHING,
  1799. };
  1800. static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
  1801. size_t src_len)
  1802. {
  1803. if (!dst)
  1804. return -EINVAL;
  1805. if (!dst_len)
  1806. return -E2BIG;
  1807. if (!src || !src_len) {
  1808. memset(dst, 0, dst_len);
  1809. return -EINVAL;
  1810. }
  1811. memcpy(dst, src, min(dst_len, src_len));
  1812. if (dst_len > src_len) {
  1813. memset(dst + src_len, '\0', dst_len - src_len);
  1814. return src_len;
  1815. }
  1816. dst[dst_len - 1] = '\0';
  1817. return -E2BIG;
  1818. }
  1819. BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
  1820. char *, buf, size_t, buf_len)
  1821. {
  1822. return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
  1823. }
  1824. static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
  1825. .func = bpf_sysctl_get_current_value,
  1826. .gpl_only = false,
  1827. .ret_type = RET_INTEGER,
  1828. .arg1_type = ARG_PTR_TO_CTX,
  1829. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  1830. .arg3_type = ARG_CONST_SIZE,
  1831. };
  1832. BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
  1833. size_t, buf_len)
  1834. {
  1835. if (!ctx->write) {
  1836. if (buf && buf_len)
  1837. memset(buf, '\0', buf_len);
  1838. return -EINVAL;
  1839. }
  1840. return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
  1841. }
  1842. static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
  1843. .func = bpf_sysctl_get_new_value,
  1844. .gpl_only = false,
  1845. .ret_type = RET_INTEGER,
  1846. .arg1_type = ARG_PTR_TO_CTX,
  1847. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  1848. .arg3_type = ARG_CONST_SIZE,
  1849. };
  1850. BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
  1851. const char *, buf, size_t, buf_len)
  1852. {
  1853. if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
  1854. return -EINVAL;
  1855. if (buf_len > PAGE_SIZE - 1)
  1856. return -E2BIG;
  1857. memcpy(ctx->new_val, buf, buf_len);
  1858. ctx->new_len = buf_len;
  1859. ctx->new_updated = 1;
  1860. return 0;
  1861. }
  1862. static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
  1863. .func = bpf_sysctl_set_new_value,
  1864. .gpl_only = false,
  1865. .ret_type = RET_INTEGER,
  1866. .arg1_type = ARG_PTR_TO_CTX,
  1867. .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
  1868. .arg3_type = ARG_CONST_SIZE,
  1869. };
  1870. static const struct bpf_func_proto *
  1871. sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1872. {
  1873. const struct bpf_func_proto *func_proto;
  1874. func_proto = cgroup_common_func_proto(func_id, prog);
  1875. if (func_proto)
  1876. return func_proto;
  1877. func_proto = cgroup_current_func_proto(func_id, prog);
  1878. if (func_proto)
  1879. return func_proto;
  1880. switch (func_id) {
  1881. case BPF_FUNC_sysctl_get_name:
  1882. return &bpf_sysctl_get_name_proto;
  1883. case BPF_FUNC_sysctl_get_current_value:
  1884. return &bpf_sysctl_get_current_value_proto;
  1885. case BPF_FUNC_sysctl_get_new_value:
  1886. return &bpf_sysctl_get_new_value_proto;
  1887. case BPF_FUNC_sysctl_set_new_value:
  1888. return &bpf_sysctl_set_new_value_proto;
  1889. case BPF_FUNC_ktime_get_coarse_ns:
  1890. return &bpf_ktime_get_coarse_ns_proto;
  1891. case BPF_FUNC_perf_event_output:
  1892. return &bpf_event_output_data_proto;
  1893. default:
  1894. return bpf_base_func_proto(func_id, prog);
  1895. }
  1896. }
  1897. static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
  1898. const struct bpf_prog *prog,
  1899. struct bpf_insn_access_aux *info)
  1900. {
  1901. const int size_default = sizeof(__u32);
  1902. if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
  1903. return false;
  1904. switch (off) {
  1905. case bpf_ctx_range(struct bpf_sysctl, write):
  1906. if (type != BPF_READ)
  1907. return false;
  1908. bpf_ctx_record_field_size(info, size_default);
  1909. return bpf_ctx_narrow_access_ok(off, size, size_default);
  1910. case bpf_ctx_range(struct bpf_sysctl, file_pos):
  1911. if (type == BPF_READ) {
  1912. bpf_ctx_record_field_size(info, size_default);
  1913. return bpf_ctx_narrow_access_ok(off, size, size_default);
  1914. } else {
  1915. return size == size_default;
  1916. }
  1917. default:
  1918. return false;
  1919. }
  1920. }
  1921. static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
  1922. const struct bpf_insn *si,
  1923. struct bpf_insn *insn_buf,
  1924. struct bpf_prog *prog, u32 *target_size)
  1925. {
  1926. struct bpf_insn *insn = insn_buf;
  1927. u32 read_size;
  1928. switch (si->off) {
  1929. case offsetof(struct bpf_sysctl, write):
  1930. *insn++ = BPF_LDX_MEM(
  1931. BPF_SIZE(si->code), si->dst_reg, si->src_reg,
  1932. bpf_target_off(struct bpf_sysctl_kern, write,
  1933. sizeof_field(struct bpf_sysctl_kern,
  1934. write),
  1935. target_size));
  1936. break;
  1937. case offsetof(struct bpf_sysctl, file_pos):
  1938. /* ppos is a pointer so it should be accessed via indirect
  1939. * loads and stores. Also for stores additional temporary
  1940. * register is used since neither src_reg nor dst_reg can be
  1941. * overridden.
  1942. */
  1943. if (type == BPF_WRITE) {
  1944. int treg = BPF_REG_9;
  1945. if (si->src_reg == treg || si->dst_reg == treg)
  1946. --treg;
  1947. if (si->src_reg == treg || si->dst_reg == treg)
  1948. --treg;
  1949. *insn++ = BPF_STX_MEM(
  1950. BPF_DW, si->dst_reg, treg,
  1951. offsetof(struct bpf_sysctl_kern, tmp_reg));
  1952. *insn++ = BPF_LDX_MEM(
  1953. BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
  1954. treg, si->dst_reg,
  1955. offsetof(struct bpf_sysctl_kern, ppos));
  1956. *insn++ = BPF_RAW_INSN(
  1957. BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32),
  1958. treg, si->src_reg,
  1959. bpf_ctx_narrow_access_offset(
  1960. 0, sizeof(u32), sizeof(loff_t)),
  1961. si->imm);
  1962. *insn++ = BPF_LDX_MEM(
  1963. BPF_DW, treg, si->dst_reg,
  1964. offsetof(struct bpf_sysctl_kern, tmp_reg));
  1965. } else {
  1966. *insn++ = BPF_LDX_MEM(
  1967. BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
  1968. si->dst_reg, si->src_reg,
  1969. offsetof(struct bpf_sysctl_kern, ppos));
  1970. read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
  1971. *insn++ = BPF_LDX_MEM(
  1972. BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
  1973. bpf_ctx_narrow_access_offset(
  1974. 0, read_size, sizeof(loff_t)));
  1975. }
  1976. *target_size = sizeof(u32);
  1977. break;
  1978. }
  1979. return insn - insn_buf;
  1980. }
  1981. const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
  1982. .get_func_proto = sysctl_func_proto,
  1983. .is_valid_access = sysctl_is_valid_access,
  1984. .convert_ctx_access = sysctl_convert_ctx_access,
  1985. };
  1986. const struct bpf_prog_ops cg_sysctl_prog_ops = {
  1987. };
  1988. #ifdef CONFIG_NET
  1989. BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
  1990. {
  1991. const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
  1992. return net->net_cookie;
  1993. }
  1994. static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
  1995. .func = bpf_get_netns_cookie_sockopt,
  1996. .gpl_only = false,
  1997. .ret_type = RET_INTEGER,
  1998. .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
  1999. };
  2000. #endif
  2001. static const struct bpf_func_proto *
  2002. cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  2003. {
  2004. const struct bpf_func_proto *func_proto;
  2005. func_proto = cgroup_common_func_proto(func_id, prog);
  2006. if (func_proto)
  2007. return func_proto;
  2008. func_proto = cgroup_current_func_proto(func_id, prog);
  2009. if (func_proto)
  2010. return func_proto;
  2011. switch (func_id) {
  2012. #ifdef CONFIG_NET
  2013. case BPF_FUNC_get_netns_cookie:
  2014. return &bpf_get_netns_cookie_sockopt_proto;
  2015. case BPF_FUNC_sk_storage_get:
  2016. return &bpf_sk_storage_get_proto;
  2017. case BPF_FUNC_sk_storage_delete:
  2018. return &bpf_sk_storage_delete_proto;
  2019. case BPF_FUNC_setsockopt:
  2020. if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
  2021. return &bpf_sk_setsockopt_proto;
  2022. return NULL;
  2023. case BPF_FUNC_getsockopt:
  2024. if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
  2025. return &bpf_sk_getsockopt_proto;
  2026. return NULL;
  2027. #endif
  2028. #ifdef CONFIG_INET
  2029. case BPF_FUNC_tcp_sock:
  2030. return &bpf_tcp_sock_proto;
  2031. #endif
  2032. case BPF_FUNC_perf_event_output:
  2033. return &bpf_event_output_data_proto;
  2034. default:
  2035. return bpf_base_func_proto(func_id, prog);
  2036. }
  2037. }
  2038. static bool cg_sockopt_is_valid_access(int off, int size,
  2039. enum bpf_access_type type,
  2040. const struct bpf_prog *prog,
  2041. struct bpf_insn_access_aux *info)
  2042. {
  2043. const int size_default = sizeof(__u32);
  2044. if (off < 0 || off >= sizeof(struct bpf_sockopt))
  2045. return false;
  2046. if (off % size != 0)
  2047. return false;
  2048. if (type == BPF_WRITE) {
  2049. switch (off) {
  2050. case offsetof(struct bpf_sockopt, retval):
  2051. if (size != size_default)
  2052. return false;
  2053. return prog->expected_attach_type ==
  2054. BPF_CGROUP_GETSOCKOPT;
  2055. case offsetof(struct bpf_sockopt, optname):
  2056. fallthrough;
  2057. case offsetof(struct bpf_sockopt, level):
  2058. if (size != size_default)
  2059. return false;
  2060. return prog->expected_attach_type ==
  2061. BPF_CGROUP_SETSOCKOPT;
  2062. case offsetof(struct bpf_sockopt, optlen):
  2063. return size == size_default;
  2064. default:
  2065. return false;
  2066. }
  2067. }
  2068. switch (off) {
  2069. case offsetof(struct bpf_sockopt, sk):
  2070. if (size != sizeof(__u64))
  2071. return false;
  2072. info->reg_type = PTR_TO_SOCKET;
  2073. break;
  2074. case offsetof(struct bpf_sockopt, optval):
  2075. if (size != sizeof(__u64))
  2076. return false;
  2077. info->reg_type = PTR_TO_PACKET;
  2078. break;
  2079. case offsetof(struct bpf_sockopt, optval_end):
  2080. if (size != sizeof(__u64))
  2081. return false;
  2082. info->reg_type = PTR_TO_PACKET_END;
  2083. break;
  2084. case offsetof(struct bpf_sockopt, retval):
  2085. if (size != size_default)
  2086. return false;
  2087. return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
  2088. default:
  2089. if (size != size_default)
  2090. return false;
  2091. break;
  2092. }
  2093. return true;
  2094. }
  2095. #define CG_SOCKOPT_READ_FIELD(F) \
  2096. BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
  2097. si->dst_reg, si->src_reg, \
  2098. offsetof(struct bpf_sockopt_kern, F))
  2099. #define CG_SOCKOPT_WRITE_FIELD(F) \
  2100. BPF_RAW_INSN((BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F) | \
  2101. BPF_MEM | BPF_CLASS(si->code)), \
  2102. si->dst_reg, si->src_reg, \
  2103. offsetof(struct bpf_sockopt_kern, F), \
  2104. si->imm)
  2105. static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
  2106. const struct bpf_insn *si,
  2107. struct bpf_insn *insn_buf,
  2108. struct bpf_prog *prog,
  2109. u32 *target_size)
  2110. {
  2111. struct bpf_insn *insn = insn_buf;
  2112. switch (si->off) {
  2113. case offsetof(struct bpf_sockopt, sk):
  2114. *insn++ = CG_SOCKOPT_READ_FIELD(sk);
  2115. break;
  2116. case offsetof(struct bpf_sockopt, level):
  2117. if (type == BPF_WRITE)
  2118. *insn++ = CG_SOCKOPT_WRITE_FIELD(level);
  2119. else
  2120. *insn++ = CG_SOCKOPT_READ_FIELD(level);
  2121. break;
  2122. case offsetof(struct bpf_sockopt, optname):
  2123. if (type == BPF_WRITE)
  2124. *insn++ = CG_SOCKOPT_WRITE_FIELD(optname);
  2125. else
  2126. *insn++ = CG_SOCKOPT_READ_FIELD(optname);
  2127. break;
  2128. case offsetof(struct bpf_sockopt, optlen):
  2129. if (type == BPF_WRITE)
  2130. *insn++ = CG_SOCKOPT_WRITE_FIELD(optlen);
  2131. else
  2132. *insn++ = CG_SOCKOPT_READ_FIELD(optlen);
  2133. break;
  2134. case offsetof(struct bpf_sockopt, retval):
  2135. BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
  2136. if (type == BPF_WRITE) {
  2137. int treg = BPF_REG_9;
  2138. if (si->src_reg == treg || si->dst_reg == treg)
  2139. --treg;
  2140. if (si->src_reg == treg || si->dst_reg == treg)
  2141. --treg;
  2142. *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
  2143. offsetof(struct bpf_sockopt_kern, tmp_reg));
  2144. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
  2145. treg, si->dst_reg,
  2146. offsetof(struct bpf_sockopt_kern, current_task));
  2147. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
  2148. treg, treg,
  2149. offsetof(struct task_struct, bpf_ctx));
  2150. *insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM |
  2151. BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
  2152. treg, si->src_reg,
  2153. offsetof(struct bpf_cg_run_ctx, retval),
  2154. si->imm);
  2155. *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
  2156. offsetof(struct bpf_sockopt_kern, tmp_reg));
  2157. } else {
  2158. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
  2159. si->dst_reg, si->src_reg,
  2160. offsetof(struct bpf_sockopt_kern, current_task));
  2161. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
  2162. si->dst_reg, si->dst_reg,
  2163. offsetof(struct task_struct, bpf_ctx));
  2164. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
  2165. si->dst_reg, si->dst_reg,
  2166. offsetof(struct bpf_cg_run_ctx, retval));
  2167. }
  2168. break;
  2169. case offsetof(struct bpf_sockopt, optval):
  2170. *insn++ = CG_SOCKOPT_READ_FIELD(optval);
  2171. break;
  2172. case offsetof(struct bpf_sockopt, optval_end):
  2173. *insn++ = CG_SOCKOPT_READ_FIELD(optval_end);
  2174. break;
  2175. }
  2176. return insn - insn_buf;
  2177. }
  2178. static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
  2179. bool direct_write,
  2180. const struct bpf_prog *prog)
  2181. {
  2182. /* Nothing to do for sockopt argument. The data is kzalloc'ated.
  2183. */
  2184. return 0;
  2185. }
  2186. const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
  2187. .get_func_proto = cg_sockopt_func_proto,
  2188. .is_valid_access = cg_sockopt_is_valid_access,
  2189. .convert_ctx_access = cg_sockopt_convert_ctx_access,
  2190. .gen_prologue = cg_sockopt_get_prologue,
  2191. };
  2192. const struct bpf_prog_ops cg_sockopt_prog_ops = {
  2193. };
  2194. /* Common helpers for cgroup hooks. */
  2195. const struct bpf_func_proto *
  2196. cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  2197. {
  2198. switch (func_id) {
  2199. case BPF_FUNC_get_local_storage:
  2200. return &bpf_get_local_storage_proto;
  2201. case BPF_FUNC_get_retval:
  2202. switch (prog->expected_attach_type) {
  2203. case BPF_CGROUP_INET_INGRESS:
  2204. case BPF_CGROUP_INET_EGRESS:
  2205. case BPF_CGROUP_SOCK_OPS:
  2206. case BPF_CGROUP_UDP4_RECVMSG:
  2207. case BPF_CGROUP_UDP6_RECVMSG:
  2208. case BPF_CGROUP_UNIX_RECVMSG:
  2209. case BPF_CGROUP_INET4_GETPEERNAME:
  2210. case BPF_CGROUP_INET6_GETPEERNAME:
  2211. case BPF_CGROUP_UNIX_GETPEERNAME:
  2212. case BPF_CGROUP_INET4_GETSOCKNAME:
  2213. case BPF_CGROUP_INET6_GETSOCKNAME:
  2214. case BPF_CGROUP_UNIX_GETSOCKNAME:
  2215. return NULL;
  2216. default:
  2217. return &bpf_get_retval_proto;
  2218. }
  2219. case BPF_FUNC_set_retval:
  2220. switch (prog->expected_attach_type) {
  2221. case BPF_CGROUP_INET_INGRESS:
  2222. case BPF_CGROUP_INET_EGRESS:
  2223. case BPF_CGROUP_SOCK_OPS:
  2224. case BPF_CGROUP_UDP4_RECVMSG:
  2225. case BPF_CGROUP_UDP6_RECVMSG:
  2226. case BPF_CGROUP_UNIX_RECVMSG:
  2227. case BPF_CGROUP_INET4_GETPEERNAME:
  2228. case BPF_CGROUP_INET6_GETPEERNAME:
  2229. case BPF_CGROUP_UNIX_GETPEERNAME:
  2230. case BPF_CGROUP_INET4_GETSOCKNAME:
  2231. case BPF_CGROUP_INET6_GETSOCKNAME:
  2232. case BPF_CGROUP_UNIX_GETSOCKNAME:
  2233. return NULL;
  2234. default:
  2235. return &bpf_set_retval_proto;
  2236. }
  2237. default:
  2238. return NULL;
  2239. }
  2240. }
  2241. /* Common helpers for cgroup hooks with valid process context. */
  2242. const struct bpf_func_proto *
  2243. cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  2244. {
  2245. switch (func_id) {
  2246. case BPF_FUNC_get_current_uid_gid:
  2247. return &bpf_get_current_uid_gid_proto;
  2248. case BPF_FUNC_get_current_comm:
  2249. return &bpf_get_current_comm_proto;
  2250. #ifdef CONFIG_CGROUP_NET_CLASSID
  2251. case BPF_FUNC_get_cgroup_classid:
  2252. return &bpf_get_cgroup_classid_curr_proto;
  2253. #endif
  2254. case BPF_FUNC_current_task_under_cgroup:
  2255. return &bpf_current_task_under_cgroup_proto;
  2256. default:
  2257. return NULL;
  2258. }
  2259. }