page_isolation.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/mm/page_isolation.c
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/page-isolation.h>
  7. #include <linux/pageblock-flags.h>
  8. #include <linux/memory.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/page_owner.h>
  11. #include <linux/migrate.h>
  12. #include "internal.h"
  13. #define CREATE_TRACE_POINTS
  14. #include <trace/events/page_isolation.h>
  15. static int set_migratetype_isolate(struct page *page, int migratetype,
  16. bool skip_hwpoisoned_pages)
  17. {
  18. struct zone *zone;
  19. unsigned long flags, pfn;
  20. struct memory_isolate_notify arg;
  21. int notifier_ret;
  22. int ret = -EBUSY;
  23. zone = page_zone(page);
  24. spin_lock_irqsave(&zone->lock, flags);
  25. /*
  26. * We assume the caller intended to SET migrate type to isolate.
  27. * If it is already set, then someone else must have raced and
  28. * set it before us. Return -EBUSY
  29. */
  30. if (is_migrate_isolate_page(page))
  31. goto out;
  32. pfn = page_to_pfn(page);
  33. arg.start_pfn = pfn;
  34. arg.nr_pages = pageblock_nr_pages;
  35. arg.pages_found = 0;
  36. /*
  37. * It may be possible to isolate a pageblock even if the
  38. * migratetype is not MIGRATE_MOVABLE. The memory isolation
  39. * notifier chain is used by balloon drivers to return the
  40. * number of pages in a range that are held by the balloon
  41. * driver to shrink memory. If all the pages are accounted for
  42. * by balloons, are free, or on the LRU, isolation can continue.
  43. * Later, for example, when memory hotplug notifier runs, these
  44. * pages reported as "can be isolated" should be isolated(freed)
  45. * by the balloon driver through the memory notifier chain.
  46. */
  47. notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  48. notifier_ret = notifier_to_errno(notifier_ret);
  49. if (notifier_ret)
  50. goto out;
  51. /*
  52. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  53. * We just check MOVABLE pages.
  54. */
  55. if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
  56. skip_hwpoisoned_pages))
  57. ret = 0;
  58. /*
  59. * immobile means "not-on-lru" pages. If immobile is larger than
  60. * removable-by-driver pages reported by notifier, we'll fail.
  61. */
  62. out:
  63. if (!ret) {
  64. unsigned long nr_pages;
  65. int mt = get_pageblock_migratetype(page);
  66. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  67. zone->nr_isolate_pageblock++;
  68. nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
  69. NULL);
  70. __mod_zone_freepage_state(zone, -nr_pages, mt);
  71. }
  72. spin_unlock_irqrestore(&zone->lock, flags);
  73. if (!ret)
  74. drain_all_pages(zone);
  75. return ret;
  76. }
  77. static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  78. {
  79. struct zone *zone;
  80. unsigned long flags, nr_pages;
  81. bool isolated_page = false;
  82. unsigned int order;
  83. unsigned long pfn, buddy_pfn;
  84. struct page *buddy;
  85. zone = page_zone(page);
  86. spin_lock_irqsave(&zone->lock, flags);
  87. if (!is_migrate_isolate_page(page))
  88. goto out;
  89. /*
  90. * Because freepage with more than pageblock_order on isolated
  91. * pageblock is restricted to merge due to freepage counting problem,
  92. * it is possible that there is free buddy page.
  93. * move_freepages_block() doesn't care of merge so we need other
  94. * approach in order to merge them. Isolation and free will make
  95. * these pages to be merged.
  96. */
  97. if (PageBuddy(page)) {
  98. order = page_order(page);
  99. if (order >= pageblock_order) {
  100. pfn = page_to_pfn(page);
  101. buddy_pfn = __find_buddy_pfn(pfn, order);
  102. buddy = page + (buddy_pfn - pfn);
  103. if (pfn_valid_within(buddy_pfn) &&
  104. !is_migrate_isolate_page(buddy)) {
  105. __isolate_free_page(page, order);
  106. isolated_page = true;
  107. }
  108. }
  109. }
  110. /*
  111. * If we isolate freepage with more than pageblock_order, there
  112. * should be no freepage in the range, so we could avoid costly
  113. * pageblock scanning for freepage moving.
  114. */
  115. if (!isolated_page) {
  116. nr_pages = move_freepages_block(zone, page, migratetype, NULL);
  117. __mod_zone_freepage_state(zone, nr_pages, migratetype);
  118. }
  119. set_pageblock_migratetype(page, migratetype);
  120. zone->nr_isolate_pageblock--;
  121. out:
  122. spin_unlock_irqrestore(&zone->lock, flags);
  123. if (isolated_page) {
  124. post_alloc_hook(page, order, __GFP_MOVABLE);
  125. __free_pages(page, order);
  126. }
  127. }
  128. static inline struct page *
  129. __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  130. {
  131. int i;
  132. for (i = 0; i < nr_pages; i++) {
  133. struct page *page;
  134. if (!pfn_valid_within(pfn + i))
  135. continue;
  136. page = pfn_to_online_page(pfn + i);
  137. if (!page)
  138. continue;
  139. return page;
  140. }
  141. return NULL;
  142. }
  143. /*
  144. * start_isolate_page_range() -- make page-allocation-type of range of pages
  145. * to be MIGRATE_ISOLATE.
  146. * @start_pfn: The lower PFN of the range to be isolated.
  147. * @end_pfn: The upper PFN of the range to be isolated.
  148. * @migratetype: migrate type to set in error recovery.
  149. *
  150. * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  151. * the range will never be allocated. Any free pages and pages freed in the
  152. * future will not be allocated again.
  153. *
  154. * start_pfn/end_pfn must be aligned to pageblock_order.
  155. * Return 0 on success and -EBUSY if any part of range cannot be isolated.
  156. *
  157. * There is no high level synchronization mechanism that prevents two threads
  158. * from trying to isolate overlapping ranges. If this happens, one thread
  159. * will notice pageblocks in the overlapping range already set to isolate.
  160. * This happens in set_migratetype_isolate, and set_migratetype_isolate
  161. * returns an error. We then clean up by restoring the migration type on
  162. * pageblocks we may have modified and return -EBUSY to caller. This
  163. * prevents two threads from simultaneously working on overlapping ranges.
  164. */
  165. int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  166. unsigned migratetype, bool skip_hwpoisoned_pages)
  167. {
  168. unsigned long pfn;
  169. unsigned long undo_pfn;
  170. struct page *page;
  171. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  172. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  173. for (pfn = start_pfn;
  174. pfn < end_pfn;
  175. pfn += pageblock_nr_pages) {
  176. page = __first_valid_page(pfn, pageblock_nr_pages);
  177. if (page &&
  178. set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
  179. undo_pfn = pfn;
  180. goto undo;
  181. }
  182. }
  183. return 0;
  184. undo:
  185. for (pfn = start_pfn;
  186. pfn < undo_pfn;
  187. pfn += pageblock_nr_pages) {
  188. struct page *page = pfn_to_online_page(pfn);
  189. if (!page)
  190. continue;
  191. unset_migratetype_isolate(page, migratetype);
  192. }
  193. return -EBUSY;
  194. }
  195. /*
  196. * Make isolated pages available again.
  197. */
  198. int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  199. unsigned migratetype)
  200. {
  201. unsigned long pfn;
  202. struct page *page;
  203. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  204. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  205. for (pfn = start_pfn;
  206. pfn < end_pfn;
  207. pfn += pageblock_nr_pages) {
  208. page = __first_valid_page(pfn, pageblock_nr_pages);
  209. if (!page || !is_migrate_isolate_page(page))
  210. continue;
  211. unset_migratetype_isolate(page, migratetype);
  212. }
  213. return 0;
  214. }
  215. /*
  216. * Test all pages in the range is free(means isolated) or not.
  217. * all pages in [start_pfn...end_pfn) must be in the same zone.
  218. * zone->lock must be held before call this.
  219. *
  220. * Returns the last tested pfn.
  221. */
  222. static unsigned long
  223. __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
  224. bool skip_hwpoisoned_pages)
  225. {
  226. struct page *page;
  227. while (pfn < end_pfn) {
  228. if (!pfn_valid_within(pfn)) {
  229. pfn++;
  230. continue;
  231. }
  232. page = pfn_to_page(pfn);
  233. if (PageBuddy(page))
  234. /*
  235. * If the page is on a free list, it has to be on
  236. * the correct MIGRATE_ISOLATE freelist. There is no
  237. * simple way to verify that as VM_BUG_ON(), though.
  238. */
  239. pfn += 1 << page_order(page);
  240. else if (skip_hwpoisoned_pages && PageHWPoison(page))
  241. /* A HWPoisoned page cannot be also PageBuddy */
  242. pfn++;
  243. else
  244. break;
  245. }
  246. return pfn;
  247. }
  248. /* Caller should ensure that requested range is in a single zone */
  249. int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
  250. bool skip_hwpoisoned_pages)
  251. {
  252. unsigned long pfn, flags;
  253. struct page *page;
  254. struct zone *zone;
  255. /*
  256. * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
  257. * are not aligned to pageblock_nr_pages.
  258. * Then we just check migratetype first.
  259. */
  260. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  261. page = __first_valid_page(pfn, pageblock_nr_pages);
  262. if (page && !is_migrate_isolate_page(page))
  263. break;
  264. }
  265. page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  266. if ((pfn < end_pfn) || !page)
  267. return -EBUSY;
  268. /* Check all pages are free or marked as ISOLATED */
  269. zone = page_zone(page);
  270. spin_lock_irqsave(&zone->lock, flags);
  271. pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
  272. skip_hwpoisoned_pages);
  273. spin_unlock_irqrestore(&zone->lock, flags);
  274. trace_test_pages_isolated(start_pfn, end_pfn, pfn);
  275. return pfn < end_pfn ? -EBUSY : 0;
  276. }
  277. struct page *alloc_migrate_target(struct page *page, unsigned long private)
  278. {
  279. return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
  280. }