iommu-pages.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2024, Google LLC.
  4. * Pasha Tatashin <pasha.tatashin@soleen.com>
  5. */
  6. #ifndef __IOMMU_PAGES_H
  7. #define __IOMMU_PAGES_H
  8. #include <linux/vmstat.h>
  9. #include <linux/gfp.h>
  10. #include <linux/mm.h>
  11. /*
  12. * All page allocations that should be reported to as "iommu-pagetables" to
  13. * userspace must use one of the functions below. This includes allocations of
  14. * page-tables and other per-iommu_domain configuration structures.
  15. *
  16. * This is necessary for the proper accounting as IOMMU state can be rather
  17. * large, i.e. multiple gigabytes in size.
  18. */
  19. /**
  20. * __iommu_alloc_account - account for newly allocated page.
  21. * @page: head struct page of the page.
  22. * @order: order of the page
  23. */
  24. static inline void __iommu_alloc_account(struct page *page, int order)
  25. {
  26. const long pgcnt = 1l << order;
  27. mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
  28. mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
  29. }
  30. /**
  31. * __iommu_free_account - account a page that is about to be freed.
  32. * @page: head struct page of the page.
  33. * @order: order of the page
  34. */
  35. static inline void __iommu_free_account(struct page *page, int order)
  36. {
  37. const long pgcnt = 1l << order;
  38. mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
  39. mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
  40. }
  41. /**
  42. * __iommu_alloc_pages - allocate a zeroed page of a given order.
  43. * @gfp: buddy allocator flags
  44. * @order: page order
  45. *
  46. * returns the head struct page of the allocated page.
  47. */
  48. static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
  49. {
  50. struct page *page;
  51. page = alloc_pages(gfp | __GFP_ZERO, order);
  52. if (unlikely(!page))
  53. return NULL;
  54. __iommu_alloc_account(page, order);
  55. return page;
  56. }
  57. /**
  58. * __iommu_free_pages - free page of a given order
  59. * @page: head struct page of the page
  60. * @order: page order
  61. */
  62. static inline void __iommu_free_pages(struct page *page, int order)
  63. {
  64. if (!page)
  65. return;
  66. __iommu_free_account(page, order);
  67. __free_pages(page, order);
  68. }
  69. /**
  70. * iommu_alloc_pages_node - allocate a zeroed page of a given order from
  71. * specific NUMA node.
  72. * @nid: memory NUMA node id
  73. * @gfp: buddy allocator flags
  74. * @order: page order
  75. *
  76. * returns the virtual address of the allocated page
  77. */
  78. static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
  79. {
  80. struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
  81. if (unlikely(!page))
  82. return NULL;
  83. __iommu_alloc_account(page, order);
  84. return page_address(page);
  85. }
  86. /**
  87. * iommu_alloc_pages - allocate a zeroed page of a given order
  88. * @gfp: buddy allocator flags
  89. * @order: page order
  90. *
  91. * returns the virtual address of the allocated page
  92. */
  93. static inline void *iommu_alloc_pages(gfp_t gfp, int order)
  94. {
  95. struct page *page = __iommu_alloc_pages(gfp, order);
  96. if (unlikely(!page))
  97. return NULL;
  98. return page_address(page);
  99. }
  100. /**
  101. * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
  102. * @nid: memory NUMA node id
  103. * @gfp: buddy allocator flags
  104. *
  105. * returns the virtual address of the allocated page
  106. */
  107. static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
  108. {
  109. return iommu_alloc_pages_node(nid, gfp, 0);
  110. }
  111. /**
  112. * iommu_alloc_page - allocate a zeroed page
  113. * @gfp: buddy allocator flags
  114. *
  115. * returns the virtual address of the allocated page
  116. */
  117. static inline void *iommu_alloc_page(gfp_t gfp)
  118. {
  119. return iommu_alloc_pages(gfp, 0);
  120. }
  121. /**
  122. * iommu_free_pages - free page of a given order
  123. * @virt: virtual address of the page to be freed.
  124. * @order: page order
  125. */
  126. static inline void iommu_free_pages(void *virt, int order)
  127. {
  128. if (!virt)
  129. return;
  130. __iommu_free_pages(virt_to_page(virt), order);
  131. }
  132. /**
  133. * iommu_free_page - free page
  134. * @virt: virtual address of the page to be freed.
  135. */
  136. static inline void iommu_free_page(void *virt)
  137. {
  138. iommu_free_pages(virt, 0);
  139. }
  140. /**
  141. * iommu_put_pages_list - free a list of pages.
  142. * @page: the head of the lru list to be freed.
  143. *
  144. * There are no locking requirement for these pages, as they are going to be
  145. * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
  146. * list once they are removed from the IOMMU page tables. However, they can
  147. * still be access through debugfs.
  148. */
  149. static inline void iommu_put_pages_list(struct list_head *page)
  150. {
  151. while (!list_empty(page)) {
  152. struct page *p = list_entry(page->prev, struct page, lru);
  153. list_del(&p->lru);
  154. __iommu_free_account(p, 0);
  155. put_page(p);
  156. }
  157. }
  158. #endif /* __IOMMU_PAGES_H */