sock_map.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
  3. #include <linux/bpf.h>
  4. #include <linux/btf_ids.h>
  5. #include <linux/filter.h>
  6. #include <linux/errno.h>
  7. #include <linux/file.h>
  8. #include <linux/net.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/skmsg.h>
  11. #include <linux/list.h>
  12. #include <linux/jhash.h>
  13. #include <linux/sock_diag.h>
  14. #include <net/udp.h>
  15. struct bpf_stab {
  16. struct bpf_map map;
  17. struct sock **sks;
  18. struct sk_psock_progs progs;
  19. spinlock_t lock;
  20. };
  21. #define SOCK_CREATE_FLAG_MASK \
  22. (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  23. /* This mutex is used to
  24. * - protect race between prog/link attach/detach and link prog update, and
  25. * - protect race between releasing and accessing map in bpf_link.
  26. * A single global mutex lock is used since it is expected contention is low.
  27. */
  28. static DEFINE_MUTEX(sockmap_mutex);
  29. static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
  30. struct bpf_prog *old, struct bpf_link *link,
  31. u32 which);
  32. static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
  33. static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  34. {
  35. struct bpf_stab *stab;
  36. if (attr->max_entries == 0 ||
  37. attr->key_size != 4 ||
  38. (attr->value_size != sizeof(u32) &&
  39. attr->value_size != sizeof(u64)) ||
  40. attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  41. return ERR_PTR(-EINVAL);
  42. stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
  43. if (!stab)
  44. return ERR_PTR(-ENOMEM);
  45. bpf_map_init_from_attr(&stab->map, attr);
  46. spin_lock_init(&stab->lock);
  47. stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
  48. sizeof(struct sock *),
  49. stab->map.numa_node);
  50. if (!stab->sks) {
  51. bpf_map_area_free(stab);
  52. return ERR_PTR(-ENOMEM);
  53. }
  54. return &stab->map;
  55. }
  56. int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
  57. {
  58. struct bpf_map *map;
  59. int ret;
  60. if (attr->attach_flags || attr->replace_bpf_fd)
  61. return -EINVAL;
  62. CLASS(fd, f)(attr->target_fd);
  63. map = __bpf_map_get(f);
  64. if (IS_ERR(map))
  65. return PTR_ERR(map);
  66. mutex_lock(&sockmap_mutex);
  67. ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
  68. mutex_unlock(&sockmap_mutex);
  69. return ret;
  70. }
  71. int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  72. {
  73. struct bpf_prog *prog;
  74. struct bpf_map *map;
  75. int ret;
  76. if (attr->attach_flags || attr->replace_bpf_fd)
  77. return -EINVAL;
  78. CLASS(fd, f)(attr->target_fd);
  79. map = __bpf_map_get(f);
  80. if (IS_ERR(map))
  81. return PTR_ERR(map);
  82. prog = bpf_prog_get(attr->attach_bpf_fd);
  83. if (IS_ERR(prog))
  84. return PTR_ERR(prog);
  85. if (prog->type != ptype) {
  86. ret = -EINVAL;
  87. goto put_prog;
  88. }
  89. mutex_lock(&sockmap_mutex);
  90. ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type);
  91. mutex_unlock(&sockmap_mutex);
  92. put_prog:
  93. bpf_prog_put(prog);
  94. return ret;
  95. }
  96. static void sock_map_sk_acquire(struct sock *sk)
  97. __acquires(&sk->sk_lock.slock)
  98. {
  99. lock_sock(sk);
  100. rcu_read_lock();
  101. }
  102. static void sock_map_sk_release(struct sock *sk)
  103. __releases(&sk->sk_lock.slock)
  104. {
  105. rcu_read_unlock();
  106. release_sock(sk);
  107. }
  108. static void sock_map_add_link(struct sk_psock *psock,
  109. struct sk_psock_link *link,
  110. struct bpf_map *map, void *link_raw)
  111. {
  112. link->link_raw = link_raw;
  113. link->map = map;
  114. spin_lock_bh(&psock->link_lock);
  115. list_add_tail(&link->list, &psock->link);
  116. spin_unlock_bh(&psock->link_lock);
  117. }
  118. static void sock_map_del_link(struct sock *sk,
  119. struct sk_psock *psock, void *link_raw)
  120. {
  121. bool strp_stop = false, verdict_stop = false;
  122. struct sk_psock_link *link, *tmp;
  123. spin_lock_bh(&psock->link_lock);
  124. list_for_each_entry_safe(link, tmp, &psock->link, list) {
  125. if (link->link_raw == link_raw) {
  126. struct bpf_map *map = link->map;
  127. struct sk_psock_progs *progs = sock_map_progs(map);
  128. if (psock->saved_data_ready && progs->stream_parser)
  129. strp_stop = true;
  130. if (psock->saved_data_ready && progs->stream_verdict)
  131. verdict_stop = true;
  132. if (psock->saved_data_ready && progs->skb_verdict)
  133. verdict_stop = true;
  134. list_del(&link->list);
  135. sk_psock_free_link(link);
  136. break;
  137. }
  138. }
  139. spin_unlock_bh(&psock->link_lock);
  140. if (strp_stop || verdict_stop) {
  141. write_lock_bh(&sk->sk_callback_lock);
  142. if (strp_stop)
  143. sk_psock_stop_strp(sk, psock);
  144. if (verdict_stop)
  145. sk_psock_stop_verdict(sk, psock);
  146. if (psock->psock_update_sk_prot)
  147. psock->psock_update_sk_prot(sk, psock, false);
  148. write_unlock_bh(&sk->sk_callback_lock);
  149. }
  150. }
  151. static void sock_map_unref(struct sock *sk, void *link_raw)
  152. {
  153. struct sk_psock *psock = sk_psock(sk);
  154. if (likely(psock)) {
  155. sock_map_del_link(sk, psock, link_raw);
  156. sk_psock_put(sk, psock);
  157. }
  158. }
  159. static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
  160. {
  161. if (!sk->sk_prot->psock_update_sk_prot)
  162. return -EINVAL;
  163. psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
  164. return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
  165. }
  166. static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
  167. {
  168. struct sk_psock *psock;
  169. rcu_read_lock();
  170. psock = sk_psock(sk);
  171. if (psock) {
  172. if (sk->sk_prot->close != sock_map_close) {
  173. psock = ERR_PTR(-EBUSY);
  174. goto out;
  175. }
  176. if (!refcount_inc_not_zero(&psock->refcnt))
  177. psock = ERR_PTR(-EBUSY);
  178. }
  179. out:
  180. rcu_read_unlock();
  181. return psock;
  182. }
  183. static int sock_map_link(struct bpf_map *map, struct sock *sk)
  184. {
  185. struct sk_psock_progs *progs = sock_map_progs(map);
  186. struct bpf_prog *stream_verdict = NULL;
  187. struct bpf_prog *stream_parser = NULL;
  188. struct bpf_prog *skb_verdict = NULL;
  189. struct bpf_prog *msg_parser = NULL;
  190. struct sk_psock *psock;
  191. int ret;
  192. stream_verdict = READ_ONCE(progs->stream_verdict);
  193. if (stream_verdict) {
  194. stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
  195. if (IS_ERR(stream_verdict))
  196. return PTR_ERR(stream_verdict);
  197. }
  198. stream_parser = READ_ONCE(progs->stream_parser);
  199. if (stream_parser) {
  200. stream_parser = bpf_prog_inc_not_zero(stream_parser);
  201. if (IS_ERR(stream_parser)) {
  202. ret = PTR_ERR(stream_parser);
  203. goto out_put_stream_verdict;
  204. }
  205. }
  206. msg_parser = READ_ONCE(progs->msg_parser);
  207. if (msg_parser) {
  208. msg_parser = bpf_prog_inc_not_zero(msg_parser);
  209. if (IS_ERR(msg_parser)) {
  210. ret = PTR_ERR(msg_parser);
  211. goto out_put_stream_parser;
  212. }
  213. }
  214. skb_verdict = READ_ONCE(progs->skb_verdict);
  215. if (skb_verdict) {
  216. skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
  217. if (IS_ERR(skb_verdict)) {
  218. ret = PTR_ERR(skb_verdict);
  219. goto out_put_msg_parser;
  220. }
  221. }
  222. psock = sock_map_psock_get_checked(sk);
  223. if (IS_ERR(psock)) {
  224. ret = PTR_ERR(psock);
  225. goto out_progs;
  226. }
  227. if (psock) {
  228. if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
  229. (stream_parser && READ_ONCE(psock->progs.stream_parser)) ||
  230. (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
  231. (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
  232. (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
  233. (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
  234. sk_psock_put(sk, psock);
  235. ret = -EBUSY;
  236. goto out_progs;
  237. }
  238. } else {
  239. psock = sk_psock_init(sk, map->numa_node);
  240. if (IS_ERR(psock)) {
  241. ret = PTR_ERR(psock);
  242. goto out_progs;
  243. }
  244. }
  245. if (msg_parser)
  246. psock_set_prog(&psock->progs.msg_parser, msg_parser);
  247. if (stream_parser)
  248. psock_set_prog(&psock->progs.stream_parser, stream_parser);
  249. if (stream_verdict)
  250. psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
  251. if (skb_verdict)
  252. psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
  253. /* msg_* and stream_* programs references tracked in psock after this
  254. * point. Reference dec and cleanup will occur through psock destructor
  255. */
  256. ret = sock_map_init_proto(sk, psock);
  257. if (ret < 0) {
  258. sk_psock_put(sk, psock);
  259. goto out;
  260. }
  261. write_lock_bh(&sk->sk_callback_lock);
  262. if (stream_parser && stream_verdict && !psock->saved_data_ready) {
  263. if (sk_is_tcp(sk))
  264. ret = sk_psock_init_strp(sk, psock);
  265. else
  266. ret = -EOPNOTSUPP;
  267. if (ret) {
  268. write_unlock_bh(&sk->sk_callback_lock);
  269. sk_psock_put(sk, psock);
  270. goto out;
  271. }
  272. sk_psock_start_strp(sk, psock);
  273. } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
  274. sk_psock_start_verdict(sk,psock);
  275. } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
  276. sk_psock_start_verdict(sk, psock);
  277. }
  278. write_unlock_bh(&sk->sk_callback_lock);
  279. return 0;
  280. out_progs:
  281. if (skb_verdict)
  282. bpf_prog_put(skb_verdict);
  283. out_put_msg_parser:
  284. if (msg_parser)
  285. bpf_prog_put(msg_parser);
  286. out_put_stream_parser:
  287. if (stream_parser)
  288. bpf_prog_put(stream_parser);
  289. out_put_stream_verdict:
  290. if (stream_verdict)
  291. bpf_prog_put(stream_verdict);
  292. out:
  293. return ret;
  294. }
  295. static void sock_map_free(struct bpf_map *map)
  296. {
  297. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  298. int i;
  299. /* After the sync no updates or deletes will be in-flight so it
  300. * is safe to walk map and remove entries without risking a race
  301. * in EEXIST update case.
  302. */
  303. synchronize_rcu();
  304. for (i = 0; i < stab->map.max_entries; i++) {
  305. struct sock **psk = &stab->sks[i];
  306. struct sock *sk;
  307. sk = xchg(psk, NULL);
  308. if (sk) {
  309. sock_hold(sk);
  310. lock_sock(sk);
  311. rcu_read_lock();
  312. sock_map_unref(sk, psk);
  313. rcu_read_unlock();
  314. release_sock(sk);
  315. sock_put(sk);
  316. }
  317. }
  318. /* wait for psock readers accessing its map link */
  319. synchronize_rcu();
  320. bpf_map_area_free(stab->sks);
  321. bpf_map_area_free(stab);
  322. }
  323. static void sock_map_release_progs(struct bpf_map *map)
  324. {
  325. psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
  326. }
  327. static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
  328. {
  329. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  330. WARN_ON_ONCE(!rcu_read_lock_held());
  331. if (unlikely(key >= map->max_entries))
  332. return NULL;
  333. return READ_ONCE(stab->sks[key]);
  334. }
  335. static void *sock_map_lookup(struct bpf_map *map, void *key)
  336. {
  337. struct sock *sk;
  338. sk = __sock_map_lookup_elem(map, *(u32 *)key);
  339. if (!sk)
  340. return NULL;
  341. if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
  342. return NULL;
  343. return sk;
  344. }
  345. static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
  346. {
  347. struct sock *sk;
  348. if (map->value_size != sizeof(u64))
  349. return ERR_PTR(-ENOSPC);
  350. sk = __sock_map_lookup_elem(map, *(u32 *)key);
  351. if (!sk)
  352. return ERR_PTR(-ENOENT);
  353. __sock_gen_cookie(sk);
  354. return &sk->sk_cookie;
  355. }
  356. static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
  357. struct sock **psk)
  358. {
  359. struct sock *sk = NULL;
  360. int err = 0;
  361. spin_lock_bh(&stab->lock);
  362. if (!sk_test || sk_test == *psk)
  363. sk = xchg(psk, NULL);
  364. if (likely(sk))
  365. sock_map_unref(sk, psk);
  366. else
  367. err = -EINVAL;
  368. spin_unlock_bh(&stab->lock);
  369. return err;
  370. }
  371. static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
  372. void *link_raw)
  373. {
  374. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  375. __sock_map_delete(stab, sk, link_raw);
  376. }
  377. static long sock_map_delete_elem(struct bpf_map *map, void *key)
  378. {
  379. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  380. u32 i = *(u32 *)key;
  381. struct sock **psk;
  382. if (unlikely(i >= map->max_entries))
  383. return -EINVAL;
  384. psk = &stab->sks[i];
  385. return __sock_map_delete(stab, NULL, psk);
  386. }
  387. static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
  388. {
  389. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  390. u32 i = key ? *(u32 *)key : U32_MAX;
  391. u32 *key_next = next;
  392. if (i == stab->map.max_entries - 1)
  393. return -ENOENT;
  394. if (i >= stab->map.max_entries)
  395. *key_next = 0;
  396. else
  397. *key_next = i + 1;
  398. return 0;
  399. }
  400. static int sock_map_update_common(struct bpf_map *map, u32 idx,
  401. struct sock *sk, u64 flags)
  402. {
  403. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  404. struct sk_psock_link *link;
  405. struct sk_psock *psock;
  406. struct sock *osk;
  407. int ret;
  408. WARN_ON_ONCE(!rcu_read_lock_held());
  409. if (unlikely(flags > BPF_EXIST))
  410. return -EINVAL;
  411. if (unlikely(idx >= map->max_entries))
  412. return -E2BIG;
  413. link = sk_psock_init_link();
  414. if (!link)
  415. return -ENOMEM;
  416. ret = sock_map_link(map, sk);
  417. if (ret < 0)
  418. goto out_free;
  419. psock = sk_psock(sk);
  420. WARN_ON_ONCE(!psock);
  421. spin_lock_bh(&stab->lock);
  422. osk = stab->sks[idx];
  423. if (osk && flags == BPF_NOEXIST) {
  424. ret = -EEXIST;
  425. goto out_unlock;
  426. } else if (!osk && flags == BPF_EXIST) {
  427. ret = -ENOENT;
  428. goto out_unlock;
  429. }
  430. sock_map_add_link(psock, link, map, &stab->sks[idx]);
  431. stab->sks[idx] = sk;
  432. if (osk)
  433. sock_map_unref(osk, &stab->sks[idx]);
  434. spin_unlock_bh(&stab->lock);
  435. return 0;
  436. out_unlock:
  437. spin_unlock_bh(&stab->lock);
  438. if (psock)
  439. sk_psock_put(sk, psock);
  440. out_free:
  441. sk_psock_free_link(link);
  442. return ret;
  443. }
  444. static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
  445. {
  446. return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
  447. ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
  448. ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
  449. }
  450. static bool sock_map_redirect_allowed(const struct sock *sk)
  451. {
  452. if (sk_is_tcp(sk))
  453. return sk->sk_state != TCP_LISTEN;
  454. else
  455. return sk->sk_state == TCP_ESTABLISHED;
  456. }
  457. static bool sock_map_sk_is_suitable(const struct sock *sk)
  458. {
  459. return !!sk->sk_prot->psock_update_sk_prot;
  460. }
  461. static bool sock_map_sk_state_allowed(const struct sock *sk)
  462. {
  463. if (sk_is_tcp(sk))
  464. return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
  465. if (sk_is_stream_unix(sk))
  466. return (1 << sk->sk_state) & TCPF_ESTABLISHED;
  467. if (sk_is_vsock(sk) &&
  468. (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET))
  469. return (1 << sk->sk_state) & TCPF_ESTABLISHED;
  470. return true;
  471. }
  472. static int sock_hash_update_common(struct bpf_map *map, void *key,
  473. struct sock *sk, u64 flags);
  474. int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
  475. u64 flags)
  476. {
  477. struct socket *sock;
  478. struct sock *sk;
  479. int ret;
  480. u64 ufd;
  481. if (map->value_size == sizeof(u64))
  482. ufd = *(u64 *)value;
  483. else
  484. ufd = *(u32 *)value;
  485. if (ufd > S32_MAX)
  486. return -EINVAL;
  487. sock = sockfd_lookup(ufd, &ret);
  488. if (!sock)
  489. return ret;
  490. sk = sock->sk;
  491. if (!sk) {
  492. ret = -EINVAL;
  493. goto out;
  494. }
  495. if (!sock_map_sk_is_suitable(sk)) {
  496. ret = -EOPNOTSUPP;
  497. goto out;
  498. }
  499. sock_map_sk_acquire(sk);
  500. if (!sock_map_sk_state_allowed(sk))
  501. ret = -EOPNOTSUPP;
  502. else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
  503. ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
  504. else
  505. ret = sock_hash_update_common(map, key, sk, flags);
  506. sock_map_sk_release(sk);
  507. out:
  508. sockfd_put(sock);
  509. return ret;
  510. }
  511. static long sock_map_update_elem(struct bpf_map *map, void *key,
  512. void *value, u64 flags)
  513. {
  514. struct sock *sk = (struct sock *)value;
  515. int ret;
  516. if (unlikely(!sk || !sk_fullsock(sk)))
  517. return -EINVAL;
  518. if (!sock_map_sk_is_suitable(sk))
  519. return -EOPNOTSUPP;
  520. local_bh_disable();
  521. bh_lock_sock(sk);
  522. if (!sock_map_sk_state_allowed(sk))
  523. ret = -EOPNOTSUPP;
  524. else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
  525. ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
  526. else
  527. ret = sock_hash_update_common(map, key, sk, flags);
  528. bh_unlock_sock(sk);
  529. local_bh_enable();
  530. return ret;
  531. }
  532. BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
  533. struct bpf_map *, map, void *, key, u64, flags)
  534. {
  535. WARN_ON_ONCE(!rcu_read_lock_held());
  536. if (likely(sock_map_sk_is_suitable(sops->sk) &&
  537. sock_map_op_okay(sops)))
  538. return sock_map_update_common(map, *(u32 *)key, sops->sk,
  539. flags);
  540. return -EOPNOTSUPP;
  541. }
  542. const struct bpf_func_proto bpf_sock_map_update_proto = {
  543. .func = bpf_sock_map_update,
  544. .gpl_only = false,
  545. .pkt_access = true,
  546. .ret_type = RET_INTEGER,
  547. .arg1_type = ARG_PTR_TO_CTX,
  548. .arg2_type = ARG_CONST_MAP_PTR,
  549. .arg3_type = ARG_PTR_TO_MAP_KEY,
  550. .arg4_type = ARG_ANYTHING,
  551. };
  552. BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
  553. struct bpf_map *, map, u32, key, u64, flags)
  554. {
  555. struct sock *sk;
  556. if (unlikely(flags & ~(BPF_F_INGRESS)))
  557. return SK_DROP;
  558. sk = __sock_map_lookup_elem(map, key);
  559. if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
  560. return SK_DROP;
  561. if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
  562. return SK_DROP;
  563. skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
  564. return SK_PASS;
  565. }
  566. const struct bpf_func_proto bpf_sk_redirect_map_proto = {
  567. .func = bpf_sk_redirect_map,
  568. .gpl_only = false,
  569. .ret_type = RET_INTEGER,
  570. .arg1_type = ARG_PTR_TO_CTX,
  571. .arg2_type = ARG_CONST_MAP_PTR,
  572. .arg3_type = ARG_ANYTHING,
  573. .arg4_type = ARG_ANYTHING,
  574. };
  575. BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
  576. struct bpf_map *, map, u32, key, u64, flags)
  577. {
  578. struct sock *sk;
  579. if (unlikely(flags & ~(BPF_F_INGRESS)))
  580. return SK_DROP;
  581. sk = __sock_map_lookup_elem(map, key);
  582. if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
  583. return SK_DROP;
  584. if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
  585. return SK_DROP;
  586. if (sk_is_vsock(sk))
  587. return SK_DROP;
  588. msg->flags = flags;
  589. msg->sk_redir = sk;
  590. return SK_PASS;
  591. }
  592. const struct bpf_func_proto bpf_msg_redirect_map_proto = {
  593. .func = bpf_msg_redirect_map,
  594. .gpl_only = false,
  595. .ret_type = RET_INTEGER,
  596. .arg1_type = ARG_PTR_TO_CTX,
  597. .arg2_type = ARG_CONST_MAP_PTR,
  598. .arg3_type = ARG_ANYTHING,
  599. .arg4_type = ARG_ANYTHING,
  600. };
  601. struct sock_map_seq_info {
  602. struct bpf_map *map;
  603. struct sock *sk;
  604. u32 index;
  605. };
  606. struct bpf_iter__sockmap {
  607. __bpf_md_ptr(struct bpf_iter_meta *, meta);
  608. __bpf_md_ptr(struct bpf_map *, map);
  609. __bpf_md_ptr(void *, key);
  610. __bpf_md_ptr(struct sock *, sk);
  611. };
  612. DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
  613. struct bpf_map *map, void *key,
  614. struct sock *sk)
  615. static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
  616. {
  617. if (unlikely(info->index >= info->map->max_entries))
  618. return NULL;
  619. info->sk = __sock_map_lookup_elem(info->map, info->index);
  620. /* can't return sk directly, since that might be NULL */
  621. return info;
  622. }
  623. static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
  624. __acquires(rcu)
  625. {
  626. struct sock_map_seq_info *info = seq->private;
  627. if (*pos == 0)
  628. ++*pos;
  629. /* pairs with sock_map_seq_stop */
  630. rcu_read_lock();
  631. return sock_map_seq_lookup_elem(info);
  632. }
  633. static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  634. __must_hold(rcu)
  635. {
  636. struct sock_map_seq_info *info = seq->private;
  637. ++*pos;
  638. ++info->index;
  639. return sock_map_seq_lookup_elem(info);
  640. }
  641. static int sock_map_seq_show(struct seq_file *seq, void *v)
  642. __must_hold(rcu)
  643. {
  644. struct sock_map_seq_info *info = seq->private;
  645. struct bpf_iter__sockmap ctx = {};
  646. struct bpf_iter_meta meta;
  647. struct bpf_prog *prog;
  648. meta.seq = seq;
  649. prog = bpf_iter_get_info(&meta, !v);
  650. if (!prog)
  651. return 0;
  652. ctx.meta = &meta;
  653. ctx.map = info->map;
  654. if (v) {
  655. ctx.key = &info->index;
  656. ctx.sk = info->sk;
  657. }
  658. return bpf_iter_run_prog(prog, &ctx);
  659. }
  660. static void sock_map_seq_stop(struct seq_file *seq, void *v)
  661. __releases(rcu)
  662. {
  663. if (!v)
  664. (void)sock_map_seq_show(seq, NULL);
  665. /* pairs with sock_map_seq_start */
  666. rcu_read_unlock();
  667. }
  668. static const struct seq_operations sock_map_seq_ops = {
  669. .start = sock_map_seq_start,
  670. .next = sock_map_seq_next,
  671. .stop = sock_map_seq_stop,
  672. .show = sock_map_seq_show,
  673. };
  674. static int sock_map_init_seq_private(void *priv_data,
  675. struct bpf_iter_aux_info *aux)
  676. {
  677. struct sock_map_seq_info *info = priv_data;
  678. bpf_map_inc_with_uref(aux->map);
  679. info->map = aux->map;
  680. return 0;
  681. }
  682. static void sock_map_fini_seq_private(void *priv_data)
  683. {
  684. struct sock_map_seq_info *info = priv_data;
  685. bpf_map_put_with_uref(info->map);
  686. }
  687. static u64 sock_map_mem_usage(const struct bpf_map *map)
  688. {
  689. u64 usage = sizeof(struct bpf_stab);
  690. usage += (u64)map->max_entries * sizeof(struct sock *);
  691. return usage;
  692. }
  693. static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
  694. .seq_ops = &sock_map_seq_ops,
  695. .init_seq_private = sock_map_init_seq_private,
  696. .fini_seq_private = sock_map_fini_seq_private,
  697. .seq_priv_size = sizeof(struct sock_map_seq_info),
  698. };
  699. BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
  700. const struct bpf_map_ops sock_map_ops = {
  701. .map_meta_equal = bpf_map_meta_equal,
  702. .map_alloc = sock_map_alloc,
  703. .map_free = sock_map_free,
  704. .map_get_next_key = sock_map_get_next_key,
  705. .map_lookup_elem_sys_only = sock_map_lookup_sys,
  706. .map_update_elem = sock_map_update_elem,
  707. .map_delete_elem = sock_map_delete_elem,
  708. .map_lookup_elem = sock_map_lookup,
  709. .map_release_uref = sock_map_release_progs,
  710. .map_check_btf = map_check_no_btf,
  711. .map_mem_usage = sock_map_mem_usage,
  712. .map_btf_id = &sock_map_btf_ids[0],
  713. .iter_seq_info = &sock_map_iter_seq_info,
  714. };
  715. struct bpf_shtab_elem {
  716. struct rcu_head rcu;
  717. u32 hash;
  718. struct sock *sk;
  719. struct hlist_node node;
  720. u8 key[];
  721. };
  722. struct bpf_shtab_bucket {
  723. struct hlist_head head;
  724. spinlock_t lock;
  725. };
  726. struct bpf_shtab {
  727. struct bpf_map map;
  728. struct bpf_shtab_bucket *buckets;
  729. u32 buckets_num;
  730. u32 elem_size;
  731. struct sk_psock_progs progs;
  732. atomic_t count;
  733. };
  734. static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
  735. {
  736. return jhash(key, len, 0);
  737. }
  738. static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
  739. u32 hash)
  740. {
  741. return &htab->buckets[hash & (htab->buckets_num - 1)];
  742. }
  743. static struct bpf_shtab_elem *
  744. sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
  745. u32 key_size)
  746. {
  747. struct bpf_shtab_elem *elem;
  748. hlist_for_each_entry_rcu(elem, head, node) {
  749. if (elem->hash == hash &&
  750. !memcmp(&elem->key, key, key_size))
  751. return elem;
  752. }
  753. return NULL;
  754. }
  755. static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
  756. {
  757. struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
  758. u32 key_size = map->key_size, hash;
  759. struct bpf_shtab_bucket *bucket;
  760. struct bpf_shtab_elem *elem;
  761. WARN_ON_ONCE(!rcu_read_lock_held());
  762. hash = sock_hash_bucket_hash(key, key_size);
  763. bucket = sock_hash_select_bucket(htab, hash);
  764. elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
  765. return elem ? elem->sk : NULL;
  766. }
  767. static void sock_hash_free_elem(struct bpf_shtab *htab,
  768. struct bpf_shtab_elem *elem)
  769. {
  770. atomic_dec(&htab->count);
  771. kfree_rcu(elem, rcu);
  772. }
  773. static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
  774. void *link_raw)
  775. {
  776. struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
  777. struct bpf_shtab_elem *elem_probe, *elem = link_raw;
  778. struct bpf_shtab_bucket *bucket;
  779. WARN_ON_ONCE(!rcu_read_lock_held());
  780. bucket = sock_hash_select_bucket(htab, elem->hash);
  781. /* elem may be deleted in parallel from the map, but access here
  782. * is okay since it's going away only after RCU grace period.
  783. * However, we need to check whether it's still present.
  784. */
  785. spin_lock_bh(&bucket->lock);
  786. elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
  787. elem->key, map->key_size);
  788. if (elem_probe && elem_probe == elem) {
  789. hlist_del_rcu(&elem->node);
  790. sock_map_unref(elem->sk, elem);
  791. sock_hash_free_elem(htab, elem);
  792. }
  793. spin_unlock_bh(&bucket->lock);
  794. }
  795. static long sock_hash_delete_elem(struct bpf_map *map, void *key)
  796. {
  797. struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
  798. u32 hash, key_size = map->key_size;
  799. struct bpf_shtab_bucket *bucket;
  800. struct bpf_shtab_elem *elem;
  801. int ret = -ENOENT;
  802. hash = sock_hash_bucket_hash(key, key_size);
  803. bucket = sock_hash_select_bucket(htab, hash);
  804. spin_lock_bh(&bucket->lock);
  805. elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
  806. if (elem) {
  807. hlist_del_rcu(&elem->node);
  808. sock_map_unref(elem->sk, elem);
  809. sock_hash_free_elem(htab, elem);
  810. ret = 0;
  811. }
  812. spin_unlock_bh(&bucket->lock);
  813. return ret;
  814. }
  815. static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
  816. void *key, u32 key_size,
  817. u32 hash, struct sock *sk,
  818. struct bpf_shtab_elem *old)
  819. {
  820. struct bpf_shtab_elem *new;
  821. if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
  822. if (!old) {
  823. atomic_dec(&htab->count);
  824. return ERR_PTR(-E2BIG);
  825. }
  826. }
  827. new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
  828. GFP_ATOMIC | __GFP_NOWARN,
  829. htab->map.numa_node);
  830. if (!new) {
  831. atomic_dec(&htab->count);
  832. return ERR_PTR(-ENOMEM);
  833. }
  834. memcpy(new->key, key, key_size);
  835. new->sk = sk;
  836. new->hash = hash;
  837. return new;
  838. }
  839. static int sock_hash_update_common(struct bpf_map *map, void *key,
  840. struct sock *sk, u64 flags)
  841. {
  842. struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
  843. u32 key_size = map->key_size, hash;
  844. struct bpf_shtab_elem *elem, *elem_new;
  845. struct bpf_shtab_bucket *bucket;
  846. struct sk_psock_link *link;
  847. struct sk_psock *psock;
  848. int ret;
  849. WARN_ON_ONCE(!rcu_read_lock_held());
  850. if (unlikely(flags > BPF_EXIST))
  851. return -EINVAL;
  852. link = sk_psock_init_link();
  853. if (!link)
  854. return -ENOMEM;
  855. ret = sock_map_link(map, sk);
  856. if (ret < 0)
  857. goto out_free;
  858. psock = sk_psock(sk);
  859. WARN_ON_ONCE(!psock);
  860. hash = sock_hash_bucket_hash(key, key_size);
  861. bucket = sock_hash_select_bucket(htab, hash);
  862. spin_lock_bh(&bucket->lock);
  863. elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
  864. if (elem && flags == BPF_NOEXIST) {
  865. ret = -EEXIST;
  866. goto out_unlock;
  867. } else if (!elem && flags == BPF_EXIST) {
  868. ret = -ENOENT;
  869. goto out_unlock;
  870. }
  871. elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
  872. if (IS_ERR(elem_new)) {
  873. ret = PTR_ERR(elem_new);
  874. goto out_unlock;
  875. }
  876. sock_map_add_link(psock, link, map, elem_new);
  877. /* Add new element to the head of the list, so that
  878. * concurrent search will find it before old elem.
  879. */
  880. hlist_add_head_rcu(&elem_new->node, &bucket->head);
  881. if (elem) {
  882. hlist_del_rcu(&elem->node);
  883. sock_map_unref(elem->sk, elem);
  884. sock_hash_free_elem(htab, elem);
  885. }
  886. spin_unlock_bh(&bucket->lock);
  887. return 0;
  888. out_unlock:
  889. spin_unlock_bh(&bucket->lock);
  890. sk_psock_put(sk, psock);
  891. out_free:
  892. sk_psock_free_link(link);
  893. return ret;
  894. }
  895. static int sock_hash_get_next_key(struct bpf_map *map, void *key,
  896. void *key_next)
  897. {
  898. struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
  899. struct bpf_shtab_elem *elem, *elem_next;
  900. u32 hash, key_size = map->key_size;
  901. struct hlist_head *head;
  902. int i = 0;
  903. if (!key)
  904. goto find_first_elem;
  905. hash = sock_hash_bucket_hash(key, key_size);
  906. head = &sock_hash_select_bucket(htab, hash)->head;
  907. elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
  908. if (!elem)
  909. goto find_first_elem;
  910. elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
  911. struct bpf_shtab_elem, node);
  912. if (elem_next) {
  913. memcpy(key_next, elem_next->key, key_size);
  914. return 0;
  915. }
  916. i = hash & (htab->buckets_num - 1);
  917. i++;
  918. find_first_elem:
  919. for (; i < htab->buckets_num; i++) {
  920. head = &sock_hash_select_bucket(htab, i)->head;
  921. elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
  922. struct bpf_shtab_elem, node);
  923. if (elem_next) {
  924. memcpy(key_next, elem_next->key, key_size);
  925. return 0;
  926. }
  927. }
  928. return -ENOENT;
  929. }
  930. static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
  931. {
  932. struct bpf_shtab *htab;
  933. int i, err;
  934. if (attr->max_entries == 0 ||
  935. attr->key_size == 0 ||
  936. (attr->value_size != sizeof(u32) &&
  937. attr->value_size != sizeof(u64)) ||
  938. attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  939. return ERR_PTR(-EINVAL);
  940. if (attr->key_size > MAX_BPF_STACK)
  941. return ERR_PTR(-E2BIG);
  942. htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
  943. if (!htab)
  944. return ERR_PTR(-ENOMEM);
  945. bpf_map_init_from_attr(&htab->map, attr);
  946. htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
  947. htab->elem_size = sizeof(struct bpf_shtab_elem) +
  948. round_up(htab->map.key_size, 8);
  949. if (htab->buckets_num == 0 ||
  950. htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
  951. err = -EINVAL;
  952. goto free_htab;
  953. }
  954. htab->buckets = bpf_map_area_alloc(htab->buckets_num *
  955. sizeof(struct bpf_shtab_bucket),
  956. htab->map.numa_node);
  957. if (!htab->buckets) {
  958. err = -ENOMEM;
  959. goto free_htab;
  960. }
  961. for (i = 0; i < htab->buckets_num; i++) {
  962. INIT_HLIST_HEAD(&htab->buckets[i].head);
  963. spin_lock_init(&htab->buckets[i].lock);
  964. }
  965. return &htab->map;
  966. free_htab:
  967. bpf_map_area_free(htab);
  968. return ERR_PTR(err);
  969. }
  970. static void sock_hash_free(struct bpf_map *map)
  971. {
  972. struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
  973. struct bpf_shtab_bucket *bucket;
  974. struct hlist_head unlink_list;
  975. struct bpf_shtab_elem *elem;
  976. struct hlist_node *node;
  977. int i;
  978. /* After the sync no updates or deletes will be in-flight so it
  979. * is safe to walk map and remove entries without risking a race
  980. * in EEXIST update case.
  981. */
  982. synchronize_rcu();
  983. for (i = 0; i < htab->buckets_num; i++) {
  984. bucket = sock_hash_select_bucket(htab, i);
  985. /* We are racing with sock_hash_delete_from_link to
  986. * enter the spin-lock critical section. Every socket on
  987. * the list is still linked to sockhash. Since link
  988. * exists, psock exists and holds a ref to socket. That
  989. * lets us to grab a socket ref too.
  990. */
  991. spin_lock_bh(&bucket->lock);
  992. hlist_for_each_entry(elem, &bucket->head, node)
  993. sock_hold(elem->sk);
  994. hlist_move_list(&bucket->head, &unlink_list);
  995. spin_unlock_bh(&bucket->lock);
  996. /* Process removed entries out of atomic context to
  997. * block for socket lock before deleting the psock's
  998. * link to sockhash.
  999. */
  1000. hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
  1001. hlist_del(&elem->node);
  1002. lock_sock(elem->sk);
  1003. rcu_read_lock();
  1004. sock_map_unref(elem->sk, elem);
  1005. rcu_read_unlock();
  1006. release_sock(elem->sk);
  1007. sock_put(elem->sk);
  1008. sock_hash_free_elem(htab, elem);
  1009. }
  1010. cond_resched();
  1011. }
  1012. /* wait for psock readers accessing its map link */
  1013. synchronize_rcu();
  1014. bpf_map_area_free(htab->buckets);
  1015. bpf_map_area_free(htab);
  1016. }
  1017. static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
  1018. {
  1019. struct sock *sk;
  1020. if (map->value_size != sizeof(u64))
  1021. return ERR_PTR(-ENOSPC);
  1022. sk = __sock_hash_lookup_elem(map, key);
  1023. if (!sk)
  1024. return ERR_PTR(-ENOENT);
  1025. __sock_gen_cookie(sk);
  1026. return &sk->sk_cookie;
  1027. }
  1028. static void *sock_hash_lookup(struct bpf_map *map, void *key)
  1029. {
  1030. struct sock *sk;
  1031. sk = __sock_hash_lookup_elem(map, key);
  1032. if (!sk)
  1033. return NULL;
  1034. if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
  1035. return NULL;
  1036. return sk;
  1037. }
  1038. static void sock_hash_release_progs(struct bpf_map *map)
  1039. {
  1040. psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
  1041. }
  1042. BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
  1043. struct bpf_map *, map, void *, key, u64, flags)
  1044. {
  1045. WARN_ON_ONCE(!rcu_read_lock_held());
  1046. if (likely(sock_map_sk_is_suitable(sops->sk) &&
  1047. sock_map_op_okay(sops)))
  1048. return sock_hash_update_common(map, key, sops->sk, flags);
  1049. return -EOPNOTSUPP;
  1050. }
  1051. const struct bpf_func_proto bpf_sock_hash_update_proto = {
  1052. .func = bpf_sock_hash_update,
  1053. .gpl_only = false,
  1054. .pkt_access = true,
  1055. .ret_type = RET_INTEGER,
  1056. .arg1_type = ARG_PTR_TO_CTX,
  1057. .arg2_type = ARG_CONST_MAP_PTR,
  1058. .arg3_type = ARG_PTR_TO_MAP_KEY,
  1059. .arg4_type = ARG_ANYTHING,
  1060. };
  1061. BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
  1062. struct bpf_map *, map, void *, key, u64, flags)
  1063. {
  1064. struct sock *sk;
  1065. if (unlikely(flags & ~(BPF_F_INGRESS)))
  1066. return SK_DROP;
  1067. sk = __sock_hash_lookup_elem(map, key);
  1068. if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
  1069. return SK_DROP;
  1070. if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
  1071. return SK_DROP;
  1072. skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
  1073. return SK_PASS;
  1074. }
  1075. const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
  1076. .func = bpf_sk_redirect_hash,
  1077. .gpl_only = false,
  1078. .ret_type = RET_INTEGER,
  1079. .arg1_type = ARG_PTR_TO_CTX,
  1080. .arg2_type = ARG_CONST_MAP_PTR,
  1081. .arg3_type = ARG_PTR_TO_MAP_KEY,
  1082. .arg4_type = ARG_ANYTHING,
  1083. };
  1084. BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
  1085. struct bpf_map *, map, void *, key, u64, flags)
  1086. {
  1087. struct sock *sk;
  1088. if (unlikely(flags & ~(BPF_F_INGRESS)))
  1089. return SK_DROP;
  1090. sk = __sock_hash_lookup_elem(map, key);
  1091. if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
  1092. return SK_DROP;
  1093. if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
  1094. return SK_DROP;
  1095. if (sk_is_vsock(sk))
  1096. return SK_DROP;
  1097. msg->flags = flags;
  1098. msg->sk_redir = sk;
  1099. return SK_PASS;
  1100. }
  1101. const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
  1102. .func = bpf_msg_redirect_hash,
  1103. .gpl_only = false,
  1104. .ret_type = RET_INTEGER,
  1105. .arg1_type = ARG_PTR_TO_CTX,
  1106. .arg2_type = ARG_CONST_MAP_PTR,
  1107. .arg3_type = ARG_PTR_TO_MAP_KEY,
  1108. .arg4_type = ARG_ANYTHING,
  1109. };
  1110. struct sock_hash_seq_info {
  1111. struct bpf_map *map;
  1112. struct bpf_shtab *htab;
  1113. u32 bucket_id;
  1114. };
  1115. static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
  1116. struct bpf_shtab_elem *prev_elem)
  1117. {
  1118. const struct bpf_shtab *htab = info->htab;
  1119. struct bpf_shtab_bucket *bucket;
  1120. struct bpf_shtab_elem *elem;
  1121. struct hlist_node *node;
  1122. /* try to find next elem in the same bucket */
  1123. if (prev_elem) {
  1124. node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
  1125. elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
  1126. if (elem)
  1127. return elem;
  1128. /* no more elements, continue in the next bucket */
  1129. info->bucket_id++;
  1130. }
  1131. for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
  1132. bucket = &htab->buckets[info->bucket_id];
  1133. node = rcu_dereference(hlist_first_rcu(&bucket->head));
  1134. elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
  1135. if (elem)
  1136. return elem;
  1137. }
  1138. return NULL;
  1139. }
  1140. static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
  1141. __acquires(rcu)
  1142. {
  1143. struct sock_hash_seq_info *info = seq->private;
  1144. if (*pos == 0)
  1145. ++*pos;
  1146. /* pairs with sock_hash_seq_stop */
  1147. rcu_read_lock();
  1148. return sock_hash_seq_find_next(info, NULL);
  1149. }
  1150. static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1151. __must_hold(rcu)
  1152. {
  1153. struct sock_hash_seq_info *info = seq->private;
  1154. ++*pos;
  1155. return sock_hash_seq_find_next(info, v);
  1156. }
  1157. static int sock_hash_seq_show(struct seq_file *seq, void *v)
  1158. __must_hold(rcu)
  1159. {
  1160. struct sock_hash_seq_info *info = seq->private;
  1161. struct bpf_iter__sockmap ctx = {};
  1162. struct bpf_shtab_elem *elem = v;
  1163. struct bpf_iter_meta meta;
  1164. struct bpf_prog *prog;
  1165. meta.seq = seq;
  1166. prog = bpf_iter_get_info(&meta, !elem);
  1167. if (!prog)
  1168. return 0;
  1169. ctx.meta = &meta;
  1170. ctx.map = info->map;
  1171. if (elem) {
  1172. ctx.key = elem->key;
  1173. ctx.sk = elem->sk;
  1174. }
  1175. return bpf_iter_run_prog(prog, &ctx);
  1176. }
  1177. static void sock_hash_seq_stop(struct seq_file *seq, void *v)
  1178. __releases(rcu)
  1179. {
  1180. if (!v)
  1181. (void)sock_hash_seq_show(seq, NULL);
  1182. /* pairs with sock_hash_seq_start */
  1183. rcu_read_unlock();
  1184. }
  1185. static const struct seq_operations sock_hash_seq_ops = {
  1186. .start = sock_hash_seq_start,
  1187. .next = sock_hash_seq_next,
  1188. .stop = sock_hash_seq_stop,
  1189. .show = sock_hash_seq_show,
  1190. };
  1191. static int sock_hash_init_seq_private(void *priv_data,
  1192. struct bpf_iter_aux_info *aux)
  1193. {
  1194. struct sock_hash_seq_info *info = priv_data;
  1195. bpf_map_inc_with_uref(aux->map);
  1196. info->map = aux->map;
  1197. info->htab = container_of(aux->map, struct bpf_shtab, map);
  1198. return 0;
  1199. }
  1200. static void sock_hash_fini_seq_private(void *priv_data)
  1201. {
  1202. struct sock_hash_seq_info *info = priv_data;
  1203. bpf_map_put_with_uref(info->map);
  1204. }
  1205. static u64 sock_hash_mem_usage(const struct bpf_map *map)
  1206. {
  1207. struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
  1208. u64 usage = sizeof(*htab);
  1209. usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
  1210. usage += atomic_read(&htab->count) * (u64)htab->elem_size;
  1211. return usage;
  1212. }
  1213. static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
  1214. .seq_ops = &sock_hash_seq_ops,
  1215. .init_seq_private = sock_hash_init_seq_private,
  1216. .fini_seq_private = sock_hash_fini_seq_private,
  1217. .seq_priv_size = sizeof(struct sock_hash_seq_info),
  1218. };
  1219. BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
  1220. const struct bpf_map_ops sock_hash_ops = {
  1221. .map_meta_equal = bpf_map_meta_equal,
  1222. .map_alloc = sock_hash_alloc,
  1223. .map_free = sock_hash_free,
  1224. .map_get_next_key = sock_hash_get_next_key,
  1225. .map_update_elem = sock_map_update_elem,
  1226. .map_delete_elem = sock_hash_delete_elem,
  1227. .map_lookup_elem = sock_hash_lookup,
  1228. .map_lookup_elem_sys_only = sock_hash_lookup_sys,
  1229. .map_release_uref = sock_hash_release_progs,
  1230. .map_check_btf = map_check_no_btf,
  1231. .map_mem_usage = sock_hash_mem_usage,
  1232. .map_btf_id = &sock_hash_map_btf_ids[0],
  1233. .iter_seq_info = &sock_hash_iter_seq_info,
  1234. };
  1235. static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
  1236. {
  1237. switch (map->map_type) {
  1238. case BPF_MAP_TYPE_SOCKMAP:
  1239. return &container_of(map, struct bpf_stab, map)->progs;
  1240. case BPF_MAP_TYPE_SOCKHASH:
  1241. return &container_of(map, struct bpf_shtab, map)->progs;
  1242. default:
  1243. break;
  1244. }
  1245. return NULL;
  1246. }
  1247. static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
  1248. struct bpf_link ***plink, u32 which)
  1249. {
  1250. struct sk_psock_progs *progs = sock_map_progs(map);
  1251. struct bpf_prog **cur_pprog;
  1252. struct bpf_link **cur_plink;
  1253. if (!progs)
  1254. return -EOPNOTSUPP;
  1255. switch (which) {
  1256. case BPF_SK_MSG_VERDICT:
  1257. cur_pprog = &progs->msg_parser;
  1258. cur_plink = &progs->msg_parser_link;
  1259. break;
  1260. #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
  1261. case BPF_SK_SKB_STREAM_PARSER:
  1262. cur_pprog = &progs->stream_parser;
  1263. cur_plink = &progs->stream_parser_link;
  1264. break;
  1265. #endif
  1266. case BPF_SK_SKB_STREAM_VERDICT:
  1267. if (progs->skb_verdict)
  1268. return -EBUSY;
  1269. cur_pprog = &progs->stream_verdict;
  1270. cur_plink = &progs->stream_verdict_link;
  1271. break;
  1272. case BPF_SK_SKB_VERDICT:
  1273. if (progs->stream_verdict)
  1274. return -EBUSY;
  1275. cur_pprog = &progs->skb_verdict;
  1276. cur_plink = &progs->skb_verdict_link;
  1277. break;
  1278. default:
  1279. return -EOPNOTSUPP;
  1280. }
  1281. *pprog = cur_pprog;
  1282. if (plink)
  1283. *plink = cur_plink;
  1284. return 0;
  1285. }
  1286. /* Handle the following four cases:
  1287. * prog_attach: prog != NULL, old == NULL, link == NULL
  1288. * prog_detach: prog == NULL, old != NULL, link == NULL
  1289. * link_attach: prog != NULL, old == NULL, link != NULL
  1290. * link_detach: prog == NULL, old != NULL, link != NULL
  1291. */
  1292. static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
  1293. struct bpf_prog *old, struct bpf_link *link,
  1294. u32 which)
  1295. {
  1296. struct bpf_prog **pprog;
  1297. struct bpf_link **plink;
  1298. int ret;
  1299. ret = sock_map_prog_link_lookup(map, &pprog, &plink, which);
  1300. if (ret)
  1301. return ret;
  1302. /* for prog_attach/prog_detach/link_attach, return error if a bpf_link
  1303. * exists for that prog.
  1304. */
  1305. if ((!link || prog) && *plink)
  1306. return -EBUSY;
  1307. if (old) {
  1308. ret = psock_replace_prog(pprog, prog, old);
  1309. if (!ret)
  1310. *plink = NULL;
  1311. } else {
  1312. psock_set_prog(pprog, prog);
  1313. if (link)
  1314. *plink = link;
  1315. }
  1316. return ret;
  1317. }
  1318. int sock_map_bpf_prog_query(const union bpf_attr *attr,
  1319. union bpf_attr __user *uattr)
  1320. {
  1321. __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
  1322. u32 prog_cnt = 0, flags = 0;
  1323. struct bpf_prog **pprog;
  1324. struct bpf_prog *prog;
  1325. struct bpf_map *map;
  1326. u32 id = 0;
  1327. int ret;
  1328. if (attr->query.query_flags)
  1329. return -EINVAL;
  1330. CLASS(fd, f)(attr->target_fd);
  1331. map = __bpf_map_get(f);
  1332. if (IS_ERR(map))
  1333. return PTR_ERR(map);
  1334. rcu_read_lock();
  1335. ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type);
  1336. if (ret)
  1337. goto end;
  1338. prog = *pprog;
  1339. prog_cnt = !prog ? 0 : 1;
  1340. if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
  1341. goto end;
  1342. /* we do not hold the refcnt, the bpf prog may be released
  1343. * asynchronously and the id would be set to 0.
  1344. */
  1345. id = data_race(prog->aux->id);
  1346. if (id == 0)
  1347. prog_cnt = 0;
  1348. end:
  1349. rcu_read_unlock();
  1350. if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
  1351. (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
  1352. copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
  1353. ret = -EFAULT;
  1354. return ret;
  1355. }
  1356. static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
  1357. {
  1358. switch (link->map->map_type) {
  1359. case BPF_MAP_TYPE_SOCKMAP:
  1360. return sock_map_delete_from_link(link->map, sk,
  1361. link->link_raw);
  1362. case BPF_MAP_TYPE_SOCKHASH:
  1363. return sock_hash_delete_from_link(link->map, sk,
  1364. link->link_raw);
  1365. default:
  1366. break;
  1367. }
  1368. }
  1369. static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
  1370. {
  1371. struct sk_psock_link *link;
  1372. while ((link = sk_psock_link_pop(psock))) {
  1373. sock_map_unlink(sk, link);
  1374. sk_psock_free_link(link);
  1375. }
  1376. }
  1377. void sock_map_unhash(struct sock *sk)
  1378. {
  1379. void (*saved_unhash)(struct sock *sk);
  1380. struct sk_psock *psock;
  1381. rcu_read_lock();
  1382. psock = sk_psock(sk);
  1383. if (unlikely(!psock)) {
  1384. rcu_read_unlock();
  1385. saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
  1386. } else {
  1387. saved_unhash = psock->saved_unhash;
  1388. sock_map_remove_links(sk, psock);
  1389. rcu_read_unlock();
  1390. }
  1391. if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
  1392. return;
  1393. if (saved_unhash)
  1394. saved_unhash(sk);
  1395. }
  1396. EXPORT_SYMBOL_GPL(sock_map_unhash);
  1397. void sock_map_destroy(struct sock *sk)
  1398. {
  1399. void (*saved_destroy)(struct sock *sk);
  1400. struct sk_psock *psock;
  1401. rcu_read_lock();
  1402. psock = sk_psock_get(sk);
  1403. if (unlikely(!psock)) {
  1404. rcu_read_unlock();
  1405. saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
  1406. } else {
  1407. saved_destroy = psock->saved_destroy;
  1408. sock_map_remove_links(sk, psock);
  1409. rcu_read_unlock();
  1410. sk_psock_stop(psock);
  1411. sk_psock_put(sk, psock);
  1412. }
  1413. if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
  1414. return;
  1415. if (saved_destroy)
  1416. saved_destroy(sk);
  1417. }
  1418. EXPORT_SYMBOL_GPL(sock_map_destroy);
  1419. void sock_map_close(struct sock *sk, long timeout)
  1420. {
  1421. void (*saved_close)(struct sock *sk, long timeout);
  1422. struct sk_psock *psock;
  1423. lock_sock(sk);
  1424. rcu_read_lock();
  1425. psock = sk_psock(sk);
  1426. if (likely(psock)) {
  1427. saved_close = psock->saved_close;
  1428. sock_map_remove_links(sk, psock);
  1429. psock = sk_psock_get(sk);
  1430. if (unlikely(!psock))
  1431. goto no_psock;
  1432. rcu_read_unlock();
  1433. sk_psock_stop(psock);
  1434. release_sock(sk);
  1435. cancel_delayed_work_sync(&psock->work);
  1436. sk_psock_put(sk, psock);
  1437. } else {
  1438. saved_close = READ_ONCE(sk->sk_prot)->close;
  1439. no_psock:
  1440. rcu_read_unlock();
  1441. release_sock(sk);
  1442. }
  1443. /* Make sure we do not recurse. This is a bug.
  1444. * Leak the socket instead of crashing on a stack overflow.
  1445. */
  1446. if (WARN_ON_ONCE(saved_close == sock_map_close))
  1447. return;
  1448. saved_close(sk, timeout);
  1449. }
  1450. EXPORT_SYMBOL_GPL(sock_map_close);
  1451. struct sockmap_link {
  1452. struct bpf_link link;
  1453. struct bpf_map *map;
  1454. enum bpf_attach_type attach_type;
  1455. };
  1456. static void sock_map_link_release(struct bpf_link *link)
  1457. {
  1458. struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
  1459. mutex_lock(&sockmap_mutex);
  1460. if (!sockmap_link->map)
  1461. goto out;
  1462. WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link,
  1463. sockmap_link->attach_type));
  1464. bpf_map_put_with_uref(sockmap_link->map);
  1465. sockmap_link->map = NULL;
  1466. out:
  1467. mutex_unlock(&sockmap_mutex);
  1468. }
  1469. static int sock_map_link_detach(struct bpf_link *link)
  1470. {
  1471. sock_map_link_release(link);
  1472. return 0;
  1473. }
  1474. static void sock_map_link_dealloc(struct bpf_link *link)
  1475. {
  1476. kfree(link);
  1477. }
  1478. /* Handle the following two cases:
  1479. * case 1: link != NULL, prog != NULL, old != NULL
  1480. * case 2: link != NULL, prog != NULL, old == NULL
  1481. */
  1482. static int sock_map_link_update_prog(struct bpf_link *link,
  1483. struct bpf_prog *prog,
  1484. struct bpf_prog *old)
  1485. {
  1486. const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
  1487. struct bpf_prog **pprog, *old_link_prog;
  1488. struct bpf_link **plink;
  1489. int ret = 0;
  1490. mutex_lock(&sockmap_mutex);
  1491. /* If old prog is not NULL, ensure old prog is the same as link->prog. */
  1492. if (old && link->prog != old) {
  1493. ret = -EPERM;
  1494. goto out;
  1495. }
  1496. /* Ensure link->prog has the same type/attach_type as the new prog. */
  1497. if (link->prog->type != prog->type ||
  1498. link->prog->expected_attach_type != prog->expected_attach_type) {
  1499. ret = -EINVAL;
  1500. goto out;
  1501. }
  1502. if (!sockmap_link->map) {
  1503. ret = -ENOLINK;
  1504. goto out;
  1505. }
  1506. ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink,
  1507. sockmap_link->attach_type);
  1508. if (ret)
  1509. goto out;
  1510. /* return error if the stored bpf_link does not match the incoming bpf_link. */
  1511. if (link != *plink) {
  1512. ret = -EBUSY;
  1513. goto out;
  1514. }
  1515. if (old) {
  1516. ret = psock_replace_prog(pprog, prog, old);
  1517. if (ret)
  1518. goto out;
  1519. } else {
  1520. psock_set_prog(pprog, prog);
  1521. }
  1522. bpf_prog_inc(prog);
  1523. old_link_prog = xchg(&link->prog, prog);
  1524. bpf_prog_put(old_link_prog);
  1525. out:
  1526. mutex_unlock(&sockmap_mutex);
  1527. return ret;
  1528. }
  1529. static u32 sock_map_link_get_map_id(const struct sockmap_link *sockmap_link)
  1530. {
  1531. u32 map_id = 0;
  1532. mutex_lock(&sockmap_mutex);
  1533. if (sockmap_link->map)
  1534. map_id = sockmap_link->map->id;
  1535. mutex_unlock(&sockmap_mutex);
  1536. return map_id;
  1537. }
  1538. static int sock_map_link_fill_info(const struct bpf_link *link,
  1539. struct bpf_link_info *info)
  1540. {
  1541. const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
  1542. u32 map_id = sock_map_link_get_map_id(sockmap_link);
  1543. info->sockmap.map_id = map_id;
  1544. info->sockmap.attach_type = sockmap_link->attach_type;
  1545. return 0;
  1546. }
  1547. static void sock_map_link_show_fdinfo(const struct bpf_link *link,
  1548. struct seq_file *seq)
  1549. {
  1550. const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link);
  1551. u32 map_id = sock_map_link_get_map_id(sockmap_link);
  1552. seq_printf(seq, "map_id:\t%u\n", map_id);
  1553. seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type);
  1554. }
  1555. static const struct bpf_link_ops sock_map_link_ops = {
  1556. .release = sock_map_link_release,
  1557. .dealloc = sock_map_link_dealloc,
  1558. .detach = sock_map_link_detach,
  1559. .update_prog = sock_map_link_update_prog,
  1560. .fill_link_info = sock_map_link_fill_info,
  1561. .show_fdinfo = sock_map_link_show_fdinfo,
  1562. };
  1563. int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
  1564. {
  1565. struct bpf_link_primer link_primer;
  1566. struct sockmap_link *sockmap_link;
  1567. enum bpf_attach_type attach_type;
  1568. struct bpf_map *map;
  1569. int ret;
  1570. if (attr->link_create.flags)
  1571. return -EINVAL;
  1572. map = bpf_map_get_with_uref(attr->link_create.target_fd);
  1573. if (IS_ERR(map))
  1574. return PTR_ERR(map);
  1575. if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) {
  1576. ret = -EINVAL;
  1577. goto out;
  1578. }
  1579. sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER);
  1580. if (!sockmap_link) {
  1581. ret = -ENOMEM;
  1582. goto out;
  1583. }
  1584. attach_type = attr->link_create.attach_type;
  1585. bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog);
  1586. sockmap_link->map = map;
  1587. sockmap_link->attach_type = attach_type;
  1588. ret = bpf_link_prime(&sockmap_link->link, &link_primer);
  1589. if (ret) {
  1590. kfree(sockmap_link);
  1591. goto out;
  1592. }
  1593. mutex_lock(&sockmap_mutex);
  1594. ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type);
  1595. mutex_unlock(&sockmap_mutex);
  1596. if (ret) {
  1597. bpf_link_cleanup(&link_primer);
  1598. goto out;
  1599. }
  1600. /* Increase refcnt for the prog since when old prog is replaced with
  1601. * psock_replace_prog() and psock_set_prog() its refcnt will be decreased.
  1602. *
  1603. * Actually, we do not need to increase refcnt for the prog since bpf_link
  1604. * will hold a reference. But in order to have less complexity w.r.t.
  1605. * replacing/setting prog, let us increase the refcnt to make things simpler.
  1606. */
  1607. bpf_prog_inc(prog);
  1608. return bpf_link_settle(&link_primer);
  1609. out:
  1610. bpf_map_put_with_uref(map);
  1611. return ret;
  1612. }
  1613. static int sock_map_iter_attach_target(struct bpf_prog *prog,
  1614. union bpf_iter_link_info *linfo,
  1615. struct bpf_iter_aux_info *aux)
  1616. {
  1617. struct bpf_map *map;
  1618. int err = -EINVAL;
  1619. if (!linfo->map.map_fd)
  1620. return -EBADF;
  1621. map = bpf_map_get_with_uref(linfo->map.map_fd);
  1622. if (IS_ERR(map))
  1623. return PTR_ERR(map);
  1624. if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
  1625. map->map_type != BPF_MAP_TYPE_SOCKHASH)
  1626. goto put_map;
  1627. if (prog->aux->max_rdonly_access > map->key_size) {
  1628. err = -EACCES;
  1629. goto put_map;
  1630. }
  1631. aux->map = map;
  1632. return 0;
  1633. put_map:
  1634. bpf_map_put_with_uref(map);
  1635. return err;
  1636. }
  1637. static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
  1638. {
  1639. bpf_map_put_with_uref(aux->map);
  1640. }
  1641. static struct bpf_iter_reg sock_map_iter_reg = {
  1642. .target = "sockmap",
  1643. .attach_target = sock_map_iter_attach_target,
  1644. .detach_target = sock_map_iter_detach_target,
  1645. .show_fdinfo = bpf_iter_map_show_fdinfo,
  1646. .fill_link_info = bpf_iter_map_fill_link_info,
  1647. .ctx_arg_info_size = 2,
  1648. .ctx_arg_info = {
  1649. { offsetof(struct bpf_iter__sockmap, key),
  1650. PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
  1651. { offsetof(struct bpf_iter__sockmap, sk),
  1652. PTR_TO_BTF_ID_OR_NULL },
  1653. },
  1654. };
  1655. static int __init bpf_sockmap_iter_init(void)
  1656. {
  1657. sock_map_iter_reg.ctx_arg_info[1].btf_id =
  1658. btf_sock_ids[BTF_SOCK_TYPE_SOCK];
  1659. return bpf_iter_reg_target(&sock_map_iter_reg);
  1660. }
  1661. late_initcall(bpf_sockmap_iter_init);