xsk.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* XDP sockets
  3. *
  4. * AF_XDP sockets allows a channel between XDP programs and userspace
  5. * applications.
  6. * Copyright(c) 2018 Intel Corporation.
  7. *
  8. * Author(s): Björn Töpel <bjorn.topel@intel.com>
  9. * Magnus Karlsson <magnus.karlsson@intel.com>
  10. */
  11. #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
  12. #include <linux/if_xdp.h>
  13. #include <linux/init.h>
  14. #include <linux/sched/mm.h>
  15. #include <linux/sched/signal.h>
  16. #include <linux/sched/task.h>
  17. #include <linux/socket.h>
  18. #include <linux/file.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/net.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/rculist.h>
  23. #include <net/xdp_sock.h>
  24. #include <net/xdp.h>
  25. #include "xsk_queue.h"
  26. #include "xdp_umem.h"
  27. #define TX_BATCH_SIZE 16
  28. static struct xdp_sock *xdp_sk(struct sock *sk)
  29. {
  30. return (struct xdp_sock *)sk;
  31. }
  32. bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
  33. {
  34. return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
  35. READ_ONCE(xs->umem->fq);
  36. }
  37. u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
  38. {
  39. return xskq_peek_addr(umem->fq, addr);
  40. }
  41. EXPORT_SYMBOL(xsk_umem_peek_addr);
  42. void xsk_umem_discard_addr(struct xdp_umem *umem)
  43. {
  44. xskq_discard_addr(umem->fq);
  45. }
  46. EXPORT_SYMBOL(xsk_umem_discard_addr);
  47. static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
  48. {
  49. void *buffer;
  50. u64 addr;
  51. int err;
  52. if (!xskq_peek_addr(xs->umem->fq, &addr) ||
  53. len > xs->umem->chunk_size_nohr) {
  54. xs->rx_dropped++;
  55. return -ENOSPC;
  56. }
  57. addr += xs->umem->headroom;
  58. buffer = xdp_umem_get_data(xs->umem, addr);
  59. memcpy(buffer, xdp->data, len);
  60. err = xskq_produce_batch_desc(xs->rx, addr, len);
  61. if (!err) {
  62. xskq_discard_addr(xs->umem->fq);
  63. xdp_return_buff(xdp);
  64. return 0;
  65. }
  66. xs->rx_dropped++;
  67. return err;
  68. }
  69. static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
  70. {
  71. int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
  72. if (err)
  73. xs->rx_dropped++;
  74. return err;
  75. }
  76. int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  77. {
  78. u32 len;
  79. if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
  80. return -EINVAL;
  81. len = xdp->data_end - xdp->data;
  82. return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
  83. __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
  84. }
  85. void xsk_flush(struct xdp_sock *xs)
  86. {
  87. xskq_produce_flush_desc(xs->rx);
  88. xs->sk.sk_data_ready(&xs->sk);
  89. }
  90. int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  91. {
  92. u32 len = xdp->data_end - xdp->data;
  93. void *buffer;
  94. u64 addr;
  95. int err;
  96. if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
  97. return -EINVAL;
  98. if (!xskq_peek_addr(xs->umem->fq, &addr) ||
  99. len > xs->umem->chunk_size_nohr) {
  100. xs->rx_dropped++;
  101. return -ENOSPC;
  102. }
  103. addr += xs->umem->headroom;
  104. buffer = xdp_umem_get_data(xs->umem, addr);
  105. memcpy(buffer, xdp->data, len);
  106. err = xskq_produce_batch_desc(xs->rx, addr, len);
  107. if (!err) {
  108. xskq_discard_addr(xs->umem->fq);
  109. xsk_flush(xs);
  110. return 0;
  111. }
  112. xs->rx_dropped++;
  113. return err;
  114. }
  115. void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
  116. {
  117. xskq_produce_flush_addr_n(umem->cq, nb_entries);
  118. }
  119. EXPORT_SYMBOL(xsk_umem_complete_tx);
  120. void xsk_umem_consume_tx_done(struct xdp_umem *umem)
  121. {
  122. struct xdp_sock *xs;
  123. rcu_read_lock();
  124. list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
  125. xs->sk.sk_write_space(&xs->sk);
  126. }
  127. rcu_read_unlock();
  128. }
  129. EXPORT_SYMBOL(xsk_umem_consume_tx_done);
  130. bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
  131. {
  132. struct xdp_desc desc;
  133. struct xdp_sock *xs;
  134. rcu_read_lock();
  135. list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
  136. if (!xskq_peek_desc(xs->tx, &desc))
  137. continue;
  138. if (xskq_produce_addr_lazy(umem->cq, desc.addr))
  139. goto out;
  140. *dma = xdp_umem_get_dma(umem, desc.addr);
  141. *len = desc.len;
  142. xskq_discard_desc(xs->tx);
  143. rcu_read_unlock();
  144. return true;
  145. }
  146. out:
  147. rcu_read_unlock();
  148. return false;
  149. }
  150. EXPORT_SYMBOL(xsk_umem_consume_tx);
  151. static int xsk_zc_xmit(struct sock *sk)
  152. {
  153. struct xdp_sock *xs = xdp_sk(sk);
  154. struct net_device *dev = xs->dev;
  155. return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
  156. }
  157. static void xsk_destruct_skb(struct sk_buff *skb)
  158. {
  159. u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
  160. struct xdp_sock *xs = xdp_sk(skb->sk);
  161. unsigned long flags;
  162. spin_lock_irqsave(&xs->tx_completion_lock, flags);
  163. WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
  164. spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
  165. sock_wfree(skb);
  166. }
  167. static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
  168. size_t total_len)
  169. {
  170. u32 max_batch = TX_BATCH_SIZE;
  171. struct xdp_sock *xs = xdp_sk(sk);
  172. bool sent_frame = false;
  173. struct xdp_desc desc;
  174. struct sk_buff *skb;
  175. int err = 0;
  176. mutex_lock(&xs->mutex);
  177. if (xs->queue_id >= xs->dev->real_num_tx_queues)
  178. goto out;
  179. while (xskq_peek_desc(xs->tx, &desc)) {
  180. char *buffer;
  181. u64 addr;
  182. u32 len;
  183. if (max_batch-- == 0) {
  184. err = -EAGAIN;
  185. goto out;
  186. }
  187. len = desc.len;
  188. skb = sock_alloc_send_skb(sk, len, 1, &err);
  189. if (unlikely(!skb))
  190. goto out;
  191. skb_put(skb, len);
  192. addr = desc.addr;
  193. buffer = xdp_umem_get_data(xs->umem, addr);
  194. err = skb_store_bits(skb, 0, buffer, len);
  195. if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
  196. kfree_skb(skb);
  197. goto out;
  198. }
  199. skb->dev = xs->dev;
  200. skb->priority = sk->sk_priority;
  201. skb->mark = sk->sk_mark;
  202. skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
  203. skb->destructor = xsk_destruct_skb;
  204. err = dev_direct_xmit(skb, xs->queue_id);
  205. xskq_discard_desc(xs->tx);
  206. /* Ignore NET_XMIT_CN as packet might have been sent */
  207. if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
  208. /* SKB completed but not sent */
  209. err = -EBUSY;
  210. goto out;
  211. }
  212. sent_frame = true;
  213. }
  214. out:
  215. if (sent_frame)
  216. sk->sk_write_space(sk);
  217. mutex_unlock(&xs->mutex);
  218. return err;
  219. }
  220. static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
  221. {
  222. bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
  223. struct sock *sk = sock->sk;
  224. struct xdp_sock *xs = xdp_sk(sk);
  225. if (unlikely(!xs->dev))
  226. return -ENXIO;
  227. if (unlikely(!(xs->dev->flags & IFF_UP)))
  228. return -ENETDOWN;
  229. if (unlikely(!xs->tx))
  230. return -ENOBUFS;
  231. if (need_wait)
  232. return -EOPNOTSUPP;
  233. return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
  234. }
  235. static __poll_t xsk_poll(struct file *file, struct socket *sock,
  236. struct poll_table_struct *wait)
  237. {
  238. __poll_t mask = datagram_poll(file, sock, wait);
  239. struct sock *sk = sock->sk;
  240. struct xdp_sock *xs = xdp_sk(sk);
  241. if (xs->rx && !xskq_empty_desc(xs->rx))
  242. mask |= EPOLLIN | EPOLLRDNORM;
  243. if (xs->tx && !xskq_full_desc(xs->tx))
  244. mask |= EPOLLOUT | EPOLLWRNORM;
  245. return mask;
  246. }
  247. static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
  248. bool umem_queue)
  249. {
  250. struct xsk_queue *q;
  251. if (entries == 0 || *queue || !is_power_of_2(entries))
  252. return -EINVAL;
  253. q = xskq_create(entries, umem_queue);
  254. if (!q)
  255. return -ENOMEM;
  256. /* Make sure queue is ready before it can be seen by others */
  257. smp_wmb();
  258. WRITE_ONCE(*queue, q);
  259. return 0;
  260. }
  261. static int xsk_release(struct socket *sock)
  262. {
  263. struct sock *sk = sock->sk;
  264. struct xdp_sock *xs = xdp_sk(sk);
  265. struct net *net;
  266. if (!sk)
  267. return 0;
  268. net = sock_net(sk);
  269. local_bh_disable();
  270. sock_prot_inuse_add(net, sk->sk_prot, -1);
  271. local_bh_enable();
  272. if (xs->dev) {
  273. struct net_device *dev = xs->dev;
  274. /* Wait for driver to stop using the xdp socket. */
  275. xdp_del_sk_umem(xs->umem, xs);
  276. xs->dev = NULL;
  277. synchronize_net();
  278. dev_put(dev);
  279. }
  280. xskq_destroy(xs->rx);
  281. xskq_destroy(xs->tx);
  282. sock_orphan(sk);
  283. sock->sk = NULL;
  284. sk_refcnt_debug_release(sk);
  285. sock_put(sk);
  286. return 0;
  287. }
  288. static struct socket *xsk_lookup_xsk_from_fd(int fd)
  289. {
  290. struct socket *sock;
  291. int err;
  292. sock = sockfd_lookup(fd, &err);
  293. if (!sock)
  294. return ERR_PTR(-ENOTSOCK);
  295. if (sock->sk->sk_family != PF_XDP) {
  296. sockfd_put(sock);
  297. return ERR_PTR(-ENOPROTOOPT);
  298. }
  299. return sock;
  300. }
  301. static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
  302. {
  303. struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
  304. struct sock *sk = sock->sk;
  305. struct xdp_sock *xs = xdp_sk(sk);
  306. struct net_device *dev;
  307. u32 flags, qid;
  308. int err = 0;
  309. if (addr_len < sizeof(struct sockaddr_xdp))
  310. return -EINVAL;
  311. if (sxdp->sxdp_family != AF_XDP)
  312. return -EINVAL;
  313. mutex_lock(&xs->mutex);
  314. if (xs->dev) {
  315. err = -EBUSY;
  316. goto out_release;
  317. }
  318. dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
  319. if (!dev) {
  320. err = -ENODEV;
  321. goto out_release;
  322. }
  323. if (!xs->rx && !xs->tx) {
  324. err = -EINVAL;
  325. goto out_unlock;
  326. }
  327. qid = sxdp->sxdp_queue_id;
  328. if ((xs->rx && qid >= dev->real_num_rx_queues) ||
  329. (xs->tx && qid >= dev->real_num_tx_queues)) {
  330. err = -EINVAL;
  331. goto out_unlock;
  332. }
  333. flags = sxdp->sxdp_flags;
  334. if (flags & XDP_SHARED_UMEM) {
  335. struct xdp_sock *umem_xs;
  336. struct socket *sock;
  337. if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
  338. /* Cannot specify flags for shared sockets. */
  339. err = -EINVAL;
  340. goto out_unlock;
  341. }
  342. if (xs->umem) {
  343. /* We have already our own. */
  344. err = -EINVAL;
  345. goto out_unlock;
  346. }
  347. sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
  348. if (IS_ERR(sock)) {
  349. err = PTR_ERR(sock);
  350. goto out_unlock;
  351. }
  352. umem_xs = xdp_sk(sock->sk);
  353. if (!umem_xs->umem) {
  354. /* No umem to inherit. */
  355. err = -EBADF;
  356. sockfd_put(sock);
  357. goto out_unlock;
  358. } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
  359. err = -EINVAL;
  360. sockfd_put(sock);
  361. goto out_unlock;
  362. }
  363. xdp_get_umem(umem_xs->umem);
  364. WRITE_ONCE(xs->umem, umem_xs->umem);
  365. sockfd_put(sock);
  366. } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
  367. err = -EINVAL;
  368. goto out_unlock;
  369. } else {
  370. /* This xsk has its own umem. */
  371. xskq_set_umem(xs->umem->fq, &xs->umem->props);
  372. xskq_set_umem(xs->umem->cq, &xs->umem->props);
  373. err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
  374. if (err)
  375. goto out_unlock;
  376. }
  377. xs->dev = dev;
  378. xs->zc = xs->umem->zc;
  379. xs->queue_id = qid;
  380. xskq_set_umem(xs->rx, &xs->umem->props);
  381. xskq_set_umem(xs->tx, &xs->umem->props);
  382. xdp_add_sk_umem(xs->umem, xs);
  383. out_unlock:
  384. if (err)
  385. dev_put(dev);
  386. out_release:
  387. mutex_unlock(&xs->mutex);
  388. return err;
  389. }
  390. static int xsk_setsockopt(struct socket *sock, int level, int optname,
  391. char __user *optval, unsigned int optlen)
  392. {
  393. struct sock *sk = sock->sk;
  394. struct xdp_sock *xs = xdp_sk(sk);
  395. int err;
  396. if (level != SOL_XDP)
  397. return -ENOPROTOOPT;
  398. switch (optname) {
  399. case XDP_RX_RING:
  400. case XDP_TX_RING:
  401. {
  402. struct xsk_queue **q;
  403. int entries;
  404. if (optlen < sizeof(entries))
  405. return -EINVAL;
  406. if (copy_from_user(&entries, optval, sizeof(entries)))
  407. return -EFAULT;
  408. mutex_lock(&xs->mutex);
  409. q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
  410. err = xsk_init_queue(entries, q, false);
  411. mutex_unlock(&xs->mutex);
  412. return err;
  413. }
  414. case XDP_UMEM_REG:
  415. {
  416. struct xdp_umem_reg mr;
  417. struct xdp_umem *umem;
  418. if (copy_from_user(&mr, optval, sizeof(mr)))
  419. return -EFAULT;
  420. mutex_lock(&xs->mutex);
  421. if (xs->umem) {
  422. mutex_unlock(&xs->mutex);
  423. return -EBUSY;
  424. }
  425. umem = xdp_umem_create(&mr);
  426. if (IS_ERR(umem)) {
  427. mutex_unlock(&xs->mutex);
  428. return PTR_ERR(umem);
  429. }
  430. /* Make sure umem is ready before it can be seen by others */
  431. smp_wmb();
  432. WRITE_ONCE(xs->umem, umem);
  433. mutex_unlock(&xs->mutex);
  434. return 0;
  435. }
  436. case XDP_UMEM_FILL_RING:
  437. case XDP_UMEM_COMPLETION_RING:
  438. {
  439. struct xsk_queue **q;
  440. int entries;
  441. if (copy_from_user(&entries, optval, sizeof(entries)))
  442. return -EFAULT;
  443. mutex_lock(&xs->mutex);
  444. if (!xs->umem) {
  445. mutex_unlock(&xs->mutex);
  446. return -EINVAL;
  447. }
  448. q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
  449. &xs->umem->cq;
  450. err = xsk_init_queue(entries, q, true);
  451. mutex_unlock(&xs->mutex);
  452. return err;
  453. }
  454. default:
  455. break;
  456. }
  457. return -ENOPROTOOPT;
  458. }
  459. static int xsk_getsockopt(struct socket *sock, int level, int optname,
  460. char __user *optval, int __user *optlen)
  461. {
  462. struct sock *sk = sock->sk;
  463. struct xdp_sock *xs = xdp_sk(sk);
  464. int len;
  465. if (level != SOL_XDP)
  466. return -ENOPROTOOPT;
  467. if (get_user(len, optlen))
  468. return -EFAULT;
  469. if (len < 0)
  470. return -EINVAL;
  471. switch (optname) {
  472. case XDP_STATISTICS:
  473. {
  474. struct xdp_statistics stats;
  475. if (len < sizeof(stats))
  476. return -EINVAL;
  477. mutex_lock(&xs->mutex);
  478. stats.rx_dropped = xs->rx_dropped;
  479. stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
  480. stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
  481. mutex_unlock(&xs->mutex);
  482. if (copy_to_user(optval, &stats, sizeof(stats)))
  483. return -EFAULT;
  484. if (put_user(sizeof(stats), optlen))
  485. return -EFAULT;
  486. return 0;
  487. }
  488. case XDP_MMAP_OFFSETS:
  489. {
  490. struct xdp_mmap_offsets off;
  491. if (len < sizeof(off))
  492. return -EINVAL;
  493. off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
  494. off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
  495. off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
  496. off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
  497. off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
  498. off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
  499. off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
  500. off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
  501. off.fr.desc = offsetof(struct xdp_umem_ring, desc);
  502. off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
  503. off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
  504. off.cr.desc = offsetof(struct xdp_umem_ring, desc);
  505. len = sizeof(off);
  506. if (copy_to_user(optval, &off, len))
  507. return -EFAULT;
  508. if (put_user(len, optlen))
  509. return -EFAULT;
  510. return 0;
  511. }
  512. default:
  513. break;
  514. }
  515. return -EOPNOTSUPP;
  516. }
  517. static int xsk_mmap(struct file *file, struct socket *sock,
  518. struct vm_area_struct *vma)
  519. {
  520. loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
  521. unsigned long size = vma->vm_end - vma->vm_start;
  522. struct xdp_sock *xs = xdp_sk(sock->sk);
  523. struct xsk_queue *q = NULL;
  524. struct xdp_umem *umem;
  525. unsigned long pfn;
  526. struct page *qpg;
  527. if (offset == XDP_PGOFF_RX_RING) {
  528. q = READ_ONCE(xs->rx);
  529. } else if (offset == XDP_PGOFF_TX_RING) {
  530. q = READ_ONCE(xs->tx);
  531. } else {
  532. umem = READ_ONCE(xs->umem);
  533. if (!umem)
  534. return -EINVAL;
  535. /* Matches the smp_wmb() in XDP_UMEM_REG */
  536. smp_rmb();
  537. if (offset == XDP_UMEM_PGOFF_FILL_RING)
  538. q = READ_ONCE(umem->fq);
  539. else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
  540. q = READ_ONCE(umem->cq);
  541. }
  542. if (!q)
  543. return -EINVAL;
  544. /* Matches the smp_wmb() in xsk_init_queue */
  545. smp_rmb();
  546. qpg = virt_to_head_page(q->ring);
  547. if (size > (PAGE_SIZE << compound_order(qpg)))
  548. return -EINVAL;
  549. pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
  550. return remap_pfn_range(vma, vma->vm_start, pfn,
  551. size, vma->vm_page_prot);
  552. }
  553. static struct proto xsk_proto = {
  554. .name = "XDP",
  555. .owner = THIS_MODULE,
  556. .obj_size = sizeof(struct xdp_sock),
  557. };
  558. static const struct proto_ops xsk_proto_ops = {
  559. .family = PF_XDP,
  560. .owner = THIS_MODULE,
  561. .release = xsk_release,
  562. .bind = xsk_bind,
  563. .connect = sock_no_connect,
  564. .socketpair = sock_no_socketpair,
  565. .accept = sock_no_accept,
  566. .getname = sock_no_getname,
  567. .poll = xsk_poll,
  568. .ioctl = sock_no_ioctl,
  569. .listen = sock_no_listen,
  570. .shutdown = sock_no_shutdown,
  571. .setsockopt = xsk_setsockopt,
  572. .getsockopt = xsk_getsockopt,
  573. .sendmsg = xsk_sendmsg,
  574. .recvmsg = sock_no_recvmsg,
  575. .mmap = xsk_mmap,
  576. .sendpage = sock_no_sendpage,
  577. };
  578. static void xsk_destruct(struct sock *sk)
  579. {
  580. struct xdp_sock *xs = xdp_sk(sk);
  581. if (!sock_flag(sk, SOCK_DEAD))
  582. return;
  583. xdp_put_umem(xs->umem);
  584. sk_refcnt_debug_dec(sk);
  585. }
  586. static int xsk_create(struct net *net, struct socket *sock, int protocol,
  587. int kern)
  588. {
  589. struct sock *sk;
  590. struct xdp_sock *xs;
  591. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  592. return -EPERM;
  593. if (sock->type != SOCK_RAW)
  594. return -ESOCKTNOSUPPORT;
  595. if (protocol)
  596. return -EPROTONOSUPPORT;
  597. sock->state = SS_UNCONNECTED;
  598. sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
  599. if (!sk)
  600. return -ENOBUFS;
  601. sock->ops = &xsk_proto_ops;
  602. sock_init_data(sock, sk);
  603. sk->sk_family = PF_XDP;
  604. sk->sk_destruct = xsk_destruct;
  605. sk_refcnt_debug_inc(sk);
  606. sock_set_flag(sk, SOCK_RCU_FREE);
  607. xs = xdp_sk(sk);
  608. mutex_init(&xs->mutex);
  609. spin_lock_init(&xs->tx_completion_lock);
  610. local_bh_disable();
  611. sock_prot_inuse_add(net, &xsk_proto, 1);
  612. local_bh_enable();
  613. return 0;
  614. }
  615. static const struct net_proto_family xsk_family_ops = {
  616. .family = PF_XDP,
  617. .create = xsk_create,
  618. .owner = THIS_MODULE,
  619. };
  620. static int __init xsk_init(void)
  621. {
  622. int err;
  623. err = proto_register(&xsk_proto, 0 /* no slab */);
  624. if (err)
  625. goto out;
  626. err = sock_register(&xsk_family_ops);
  627. if (err)
  628. goto out_proto;
  629. return 0;
  630. out_proto:
  631. proto_unregister(&xsk_proto);
  632. out:
  633. return err;
  634. }
  635. fs_initcall(xsk_init);