devmem.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Device memory TCP support
  4. *
  5. * Authors: Mina Almasry <almasrymina@google.com>
  6. * Willem de Bruijn <willemb@google.com>
  7. * Kaiyuan Zhang <kaiyuanz@google.com>
  8. *
  9. */
  10. #ifndef _NET_DEVMEM_H
  11. #define _NET_DEVMEM_H
  12. struct netlink_ext_ack;
  13. struct net_devmem_dmabuf_binding {
  14. struct dma_buf *dmabuf;
  15. struct dma_buf_attachment *attachment;
  16. struct sg_table *sgt;
  17. struct net_device *dev;
  18. struct gen_pool *chunk_pool;
  19. /* The user holds a ref (via the netlink API) for as long as they want
  20. * the binding to remain alive. Each page pool using this binding holds
  21. * a ref to keep the binding alive. Each allocated net_iov holds a
  22. * ref.
  23. *
  24. * The binding undos itself and unmaps the underlying dmabuf once all
  25. * those refs are dropped and the binding is no longer desired or in
  26. * use.
  27. */
  28. refcount_t ref;
  29. /* The list of bindings currently active. Used for netlink to notify us
  30. * of the user dropping the bind.
  31. */
  32. struct list_head list;
  33. /* rxq's this binding is active on. */
  34. struct xarray bound_rxqs;
  35. /* ID of this binding. Globally unique to all bindings currently
  36. * active.
  37. */
  38. u32 id;
  39. };
  40. #if defined(CONFIG_NET_DEVMEM)
  41. /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
  42. * entry from the dmabuf is inserted into the genpool as a chunk, and needs
  43. * this owner struct to keep track of some metadata necessary to create
  44. * allocations from this chunk.
  45. */
  46. struct dmabuf_genpool_chunk_owner {
  47. /* Offset into the dma-buf where this chunk starts. */
  48. unsigned long base_virtual;
  49. /* dma_addr of the start of the chunk. */
  50. dma_addr_t base_dma_addr;
  51. /* Array of net_iovs for this chunk. */
  52. struct net_iov *niovs;
  53. size_t num_niovs;
  54. struct net_devmem_dmabuf_binding *binding;
  55. };
  56. void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
  57. struct net_devmem_dmabuf_binding *
  58. net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
  59. struct netlink_ext_ack *extack);
  60. void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
  61. int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
  62. struct net_devmem_dmabuf_binding *binding,
  63. struct netlink_ext_ack *extack);
  64. void dev_dmabuf_uninstall(struct net_device *dev);
  65. static inline struct dmabuf_genpool_chunk_owner *
  66. net_iov_owner(const struct net_iov *niov)
  67. {
  68. return niov->owner;
  69. }
  70. static inline unsigned int net_iov_idx(const struct net_iov *niov)
  71. {
  72. return niov - net_iov_owner(niov)->niovs;
  73. }
  74. static inline struct net_devmem_dmabuf_binding *
  75. net_iov_binding(const struct net_iov *niov)
  76. {
  77. return net_iov_owner(niov)->binding;
  78. }
  79. static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
  80. {
  81. struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
  82. return owner->base_virtual +
  83. ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
  84. }
  85. static inline u32 net_iov_binding_id(const struct net_iov *niov)
  86. {
  87. return net_iov_owner(niov)->binding->id;
  88. }
  89. static inline void
  90. net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
  91. {
  92. refcount_inc(&binding->ref);
  93. }
  94. static inline void
  95. net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
  96. {
  97. if (!refcount_dec_and_test(&binding->ref))
  98. return;
  99. __net_devmem_dmabuf_binding_free(binding);
  100. }
  101. struct net_iov *
  102. net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
  103. void net_devmem_free_dmabuf(struct net_iov *ppiov);
  104. #else
  105. struct net_devmem_dmabuf_binding;
  106. static inline void
  107. __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
  108. {
  109. }
  110. static inline struct net_devmem_dmabuf_binding *
  111. net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
  112. struct netlink_ext_ack *extack)
  113. {
  114. return ERR_PTR(-EOPNOTSUPP);
  115. }
  116. static inline void
  117. net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
  118. {
  119. }
  120. static inline int
  121. net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
  122. struct net_devmem_dmabuf_binding *binding,
  123. struct netlink_ext_ack *extack)
  124. {
  125. return -EOPNOTSUPP;
  126. }
  127. static inline void dev_dmabuf_uninstall(struct net_device *dev)
  128. {
  129. }
  130. static inline struct net_iov *
  131. net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
  132. {
  133. return NULL;
  134. }
  135. static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
  136. {
  137. }
  138. static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
  139. {
  140. return 0;
  141. }
  142. static inline u32 net_iov_binding_id(const struct net_iov *niov)
  143. {
  144. return 0;
  145. }
  146. #endif
  147. #endif /* _NET_DEVMEM_H */