vaddr.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DAMON Primitives for Virtual Address Spaces
  4. *
  5. * Author: SeongJae Park <sj@kernel.org>
  6. */
  7. #define pr_fmt(fmt) "damon-va: " fmt
  8. #include <linux/highmem.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/mman.h>
  11. #include <linux/mmu_notifier.h>
  12. #include <linux/page_idle.h>
  13. #include <linux/pagewalk.h>
  14. #include <linux/sched/mm.h>
  15. #include "ops-common.h"
  16. #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
  17. #undef DAMON_MIN_REGION
  18. #define DAMON_MIN_REGION 1
  19. #endif
  20. /*
  21. * 't->pid' should be the pointer to the relevant 'struct pid' having reference
  22. * count. Caller must put the returned task, unless it is NULL.
  23. */
  24. static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
  25. {
  26. return get_pid_task(t->pid, PIDTYPE_PID);
  27. }
  28. /*
  29. * Get the mm_struct of the given target
  30. *
  31. * Caller _must_ put the mm_struct after use, unless it is NULL.
  32. *
  33. * Returns the mm_struct of the target on success, NULL on failure
  34. */
  35. static struct mm_struct *damon_get_mm(struct damon_target *t)
  36. {
  37. struct task_struct *task;
  38. struct mm_struct *mm;
  39. task = damon_get_task_struct(t);
  40. if (!task)
  41. return NULL;
  42. mm = get_task_mm(task);
  43. put_task_struct(task);
  44. return mm;
  45. }
  46. /*
  47. * Functions for the initial monitoring target regions construction
  48. */
  49. /*
  50. * Size-evenly split a region into 'nr_pieces' small regions
  51. *
  52. * Returns 0 on success, or negative error code otherwise.
  53. */
  54. static int damon_va_evenly_split_region(struct damon_target *t,
  55. struct damon_region *r, unsigned int nr_pieces)
  56. {
  57. unsigned long sz_orig, sz_piece, orig_end;
  58. struct damon_region *n = NULL, *next;
  59. unsigned long start;
  60. unsigned int i;
  61. if (!r || !nr_pieces)
  62. return -EINVAL;
  63. orig_end = r->ar.end;
  64. sz_orig = damon_sz_region(r);
  65. sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
  66. if (!sz_piece)
  67. return -EINVAL;
  68. r->ar.end = r->ar.start + sz_piece;
  69. next = damon_next_region(r);
  70. for (start = r->ar.end, i = 1; i < nr_pieces; start += sz_piece, i++) {
  71. n = damon_new_region(start, start + sz_piece);
  72. if (!n)
  73. return -ENOMEM;
  74. damon_insert_region(n, r, next, t);
  75. r = n;
  76. }
  77. /* complement last region for possible rounding error */
  78. if (n)
  79. n->ar.end = orig_end;
  80. return 0;
  81. }
  82. static unsigned long sz_range(struct damon_addr_range *r)
  83. {
  84. return r->end - r->start;
  85. }
  86. /*
  87. * Find three regions separated by two biggest unmapped regions
  88. *
  89. * vma the head vma of the target address space
  90. * regions an array of three address ranges that results will be saved
  91. *
  92. * This function receives an address space and finds three regions in it which
  93. * separated by the two biggest unmapped regions in the space. Please refer to
  94. * below comments of '__damon_va_init_regions()' function to know why this is
  95. * necessary.
  96. *
  97. * Returns 0 if success, or negative error code otherwise.
  98. */
  99. static int __damon_va_three_regions(struct mm_struct *mm,
  100. struct damon_addr_range regions[3])
  101. {
  102. struct damon_addr_range first_gap = {0}, second_gap = {0};
  103. VMA_ITERATOR(vmi, mm, 0);
  104. struct vm_area_struct *vma, *prev = NULL;
  105. unsigned long start;
  106. /*
  107. * Find the two biggest gaps so that first_gap > second_gap > others.
  108. * If this is too slow, it can be optimised to examine the maple
  109. * tree gaps.
  110. */
  111. rcu_read_lock();
  112. for_each_vma(vmi, vma) {
  113. unsigned long gap;
  114. if (!prev) {
  115. start = vma->vm_start;
  116. goto next;
  117. }
  118. gap = vma->vm_start - prev->vm_end;
  119. if (gap > sz_range(&first_gap)) {
  120. second_gap = first_gap;
  121. first_gap.start = prev->vm_end;
  122. first_gap.end = vma->vm_start;
  123. } else if (gap > sz_range(&second_gap)) {
  124. second_gap.start = prev->vm_end;
  125. second_gap.end = vma->vm_start;
  126. }
  127. next:
  128. prev = vma;
  129. }
  130. rcu_read_unlock();
  131. if (!sz_range(&second_gap) || !sz_range(&first_gap))
  132. return -EINVAL;
  133. /* Sort the two biggest gaps by address */
  134. if (first_gap.start > second_gap.start)
  135. swap(first_gap, second_gap);
  136. /* Store the result */
  137. regions[0].start = ALIGN(start, DAMON_MIN_REGION);
  138. regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
  139. regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
  140. regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
  141. regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
  142. regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION);
  143. return 0;
  144. }
  145. /*
  146. * Get the three regions in the given target (task)
  147. *
  148. * Returns 0 on success, negative error code otherwise.
  149. */
  150. static int damon_va_three_regions(struct damon_target *t,
  151. struct damon_addr_range regions[3])
  152. {
  153. struct mm_struct *mm;
  154. int rc;
  155. mm = damon_get_mm(t);
  156. if (!mm)
  157. return -EINVAL;
  158. mmap_read_lock(mm);
  159. rc = __damon_va_three_regions(mm, regions);
  160. mmap_read_unlock(mm);
  161. mmput(mm);
  162. return rc;
  163. }
  164. /*
  165. * Initialize the monitoring target regions for the given target (task)
  166. *
  167. * t the given target
  168. *
  169. * Because only a number of small portions of the entire address space
  170. * is actually mapped to the memory and accessed, monitoring the unmapped
  171. * regions is wasteful. That said, because we can deal with small noises,
  172. * tracking every mapping is not strictly required but could even incur a high
  173. * overhead if the mapping frequently changes or the number of mappings is
  174. * high. The adaptive regions adjustment mechanism will further help to deal
  175. * with the noise by simply identifying the unmapped areas as a region that
  176. * has no access. Moreover, applying the real mappings that would have many
  177. * unmapped areas inside will make the adaptive mechanism quite complex. That
  178. * said, too huge unmapped areas inside the monitoring target should be removed
  179. * to not take the time for the adaptive mechanism.
  180. *
  181. * For the reason, we convert the complex mappings to three distinct regions
  182. * that cover every mapped area of the address space. Also the two gaps
  183. * between the three regions are the two biggest unmapped areas in the given
  184. * address space. In detail, this function first identifies the start and the
  185. * end of the mappings and the two biggest unmapped areas of the address space.
  186. * Then, it constructs the three regions as below:
  187. *
  188. * [mappings[0]->start, big_two_unmapped_areas[0]->start)
  189. * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
  190. * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
  191. *
  192. * As usual memory map of processes is as below, the gap between the heap and
  193. * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
  194. * region and the stack will be two biggest unmapped regions. Because these
  195. * gaps are exceptionally huge areas in usual address space, excluding these
  196. * two biggest unmapped regions will be sufficient to make a trade-off.
  197. *
  198. * <heap>
  199. * <BIG UNMAPPED REGION 1>
  200. * <uppermost mmap()-ed region>
  201. * (other mmap()-ed regions and small unmapped regions)
  202. * <lowermost mmap()-ed region>
  203. * <BIG UNMAPPED REGION 2>
  204. * <stack>
  205. */
  206. static void __damon_va_init_regions(struct damon_ctx *ctx,
  207. struct damon_target *t)
  208. {
  209. struct damon_target *ti;
  210. struct damon_region *r;
  211. struct damon_addr_range regions[3];
  212. unsigned long sz = 0, nr_pieces;
  213. int i, tidx = 0;
  214. if (damon_va_three_regions(t, regions)) {
  215. damon_for_each_target(ti, ctx) {
  216. if (ti == t)
  217. break;
  218. tidx++;
  219. }
  220. pr_debug("Failed to get three regions of %dth target\n", tidx);
  221. return;
  222. }
  223. for (i = 0; i < 3; i++)
  224. sz += regions[i].end - regions[i].start;
  225. if (ctx->attrs.min_nr_regions)
  226. sz /= ctx->attrs.min_nr_regions;
  227. if (sz < DAMON_MIN_REGION)
  228. sz = DAMON_MIN_REGION;
  229. /* Set the initial three regions of the target */
  230. for (i = 0; i < 3; i++) {
  231. r = damon_new_region(regions[i].start, regions[i].end);
  232. if (!r) {
  233. pr_err("%d'th init region creation failed\n", i);
  234. return;
  235. }
  236. damon_add_region(r, t);
  237. nr_pieces = (regions[i].end - regions[i].start) / sz;
  238. damon_va_evenly_split_region(t, r, nr_pieces);
  239. }
  240. }
  241. /* Initialize '->regions_list' of every target (task) */
  242. static void damon_va_init(struct damon_ctx *ctx)
  243. {
  244. struct damon_target *t;
  245. damon_for_each_target(t, ctx) {
  246. /* the user may set the target regions as they want */
  247. if (!damon_nr_regions(t))
  248. __damon_va_init_regions(ctx, t);
  249. }
  250. }
  251. /*
  252. * Update regions for current memory mappings
  253. */
  254. static void damon_va_update(struct damon_ctx *ctx)
  255. {
  256. struct damon_addr_range three_regions[3];
  257. struct damon_target *t;
  258. damon_for_each_target(t, ctx) {
  259. if (damon_va_three_regions(t, three_regions))
  260. continue;
  261. damon_set_regions(t, three_regions, 3);
  262. }
  263. }
  264. static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
  265. unsigned long next, struct mm_walk *walk)
  266. {
  267. pte_t *pte;
  268. pmd_t pmde;
  269. spinlock_t *ptl;
  270. if (pmd_trans_huge(pmdp_get(pmd))) {
  271. ptl = pmd_lock(walk->mm, pmd);
  272. pmde = pmdp_get(pmd);
  273. if (!pmd_present(pmde)) {
  274. spin_unlock(ptl);
  275. return 0;
  276. }
  277. if (pmd_trans_huge(pmde)) {
  278. damon_pmdp_mkold(pmd, walk->vma, addr);
  279. spin_unlock(ptl);
  280. return 0;
  281. }
  282. spin_unlock(ptl);
  283. }
  284. pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  285. if (!pte) {
  286. walk->action = ACTION_AGAIN;
  287. return 0;
  288. }
  289. if (!pte_present(ptep_get(pte)))
  290. goto out;
  291. damon_ptep_mkold(pte, walk->vma, addr);
  292. out:
  293. pte_unmap_unlock(pte, ptl);
  294. return 0;
  295. }
  296. #ifdef CONFIG_HUGETLB_PAGE
  297. static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
  298. struct vm_area_struct *vma, unsigned long addr)
  299. {
  300. bool referenced = false;
  301. pte_t entry = huge_ptep_get(mm, addr, pte);
  302. struct folio *folio = pfn_folio(pte_pfn(entry));
  303. unsigned long psize = huge_page_size(hstate_vma(vma));
  304. folio_get(folio);
  305. if (pte_young(entry)) {
  306. referenced = true;
  307. entry = pte_mkold(entry);
  308. set_huge_pte_at(mm, addr, pte, entry, psize);
  309. }
  310. #ifdef CONFIG_MMU_NOTIFIER
  311. if (mmu_notifier_clear_young(mm, addr,
  312. addr + huge_page_size(hstate_vma(vma))))
  313. referenced = true;
  314. #endif /* CONFIG_MMU_NOTIFIER */
  315. if (referenced)
  316. folio_set_young(folio);
  317. folio_set_idle(folio);
  318. folio_put(folio);
  319. }
  320. static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
  321. unsigned long addr, unsigned long end,
  322. struct mm_walk *walk)
  323. {
  324. struct hstate *h = hstate_vma(walk->vma);
  325. spinlock_t *ptl;
  326. pte_t entry;
  327. ptl = huge_pte_lock(h, walk->mm, pte);
  328. entry = huge_ptep_get(walk->mm, addr, pte);
  329. if (!pte_present(entry))
  330. goto out;
  331. damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
  332. out:
  333. spin_unlock(ptl);
  334. return 0;
  335. }
  336. #else
  337. #define damon_mkold_hugetlb_entry NULL
  338. #endif /* CONFIG_HUGETLB_PAGE */
  339. static const struct mm_walk_ops damon_mkold_ops = {
  340. .pmd_entry = damon_mkold_pmd_entry,
  341. .hugetlb_entry = damon_mkold_hugetlb_entry,
  342. .walk_lock = PGWALK_RDLOCK,
  343. };
  344. static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
  345. {
  346. mmap_read_lock(mm);
  347. walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
  348. mmap_read_unlock(mm);
  349. }
  350. /*
  351. * Functions for the access checking of the regions
  352. */
  353. static void __damon_va_prepare_access_check(struct mm_struct *mm,
  354. struct damon_region *r)
  355. {
  356. r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
  357. damon_va_mkold(mm, r->sampling_addr);
  358. }
  359. static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
  360. {
  361. struct damon_target *t;
  362. struct mm_struct *mm;
  363. struct damon_region *r;
  364. damon_for_each_target(t, ctx) {
  365. mm = damon_get_mm(t);
  366. if (!mm)
  367. continue;
  368. damon_for_each_region(r, t)
  369. __damon_va_prepare_access_check(mm, r);
  370. mmput(mm);
  371. }
  372. }
  373. struct damon_young_walk_private {
  374. /* size of the folio for the access checked virtual memory address */
  375. unsigned long *folio_sz;
  376. bool young;
  377. };
  378. static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
  379. unsigned long next, struct mm_walk *walk)
  380. {
  381. pte_t *pte;
  382. pte_t ptent;
  383. spinlock_t *ptl;
  384. struct folio *folio;
  385. struct damon_young_walk_private *priv = walk->private;
  386. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  387. if (pmd_trans_huge(pmdp_get(pmd))) {
  388. pmd_t pmde;
  389. ptl = pmd_lock(walk->mm, pmd);
  390. pmde = pmdp_get(pmd);
  391. if (!pmd_present(pmde)) {
  392. spin_unlock(ptl);
  393. return 0;
  394. }
  395. if (!pmd_trans_huge(pmde)) {
  396. spin_unlock(ptl);
  397. goto regular_page;
  398. }
  399. folio = damon_get_folio(pmd_pfn(pmde));
  400. if (!folio)
  401. goto huge_out;
  402. if (pmd_young(pmde) || !folio_test_idle(folio) ||
  403. mmu_notifier_test_young(walk->mm,
  404. addr))
  405. priv->young = true;
  406. *priv->folio_sz = HPAGE_PMD_SIZE;
  407. folio_put(folio);
  408. huge_out:
  409. spin_unlock(ptl);
  410. return 0;
  411. }
  412. regular_page:
  413. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  414. pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  415. if (!pte) {
  416. walk->action = ACTION_AGAIN;
  417. return 0;
  418. }
  419. ptent = ptep_get(pte);
  420. if (!pte_present(ptent))
  421. goto out;
  422. folio = damon_get_folio(pte_pfn(ptent));
  423. if (!folio)
  424. goto out;
  425. if (pte_young(ptent) || !folio_test_idle(folio) ||
  426. mmu_notifier_test_young(walk->mm, addr))
  427. priv->young = true;
  428. *priv->folio_sz = folio_size(folio);
  429. folio_put(folio);
  430. out:
  431. pte_unmap_unlock(pte, ptl);
  432. return 0;
  433. }
  434. #ifdef CONFIG_HUGETLB_PAGE
  435. static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
  436. unsigned long addr, unsigned long end,
  437. struct mm_walk *walk)
  438. {
  439. struct damon_young_walk_private *priv = walk->private;
  440. struct hstate *h = hstate_vma(walk->vma);
  441. struct folio *folio;
  442. spinlock_t *ptl;
  443. pte_t entry;
  444. ptl = huge_pte_lock(h, walk->mm, pte);
  445. entry = huge_ptep_get(walk->mm, addr, pte);
  446. if (!pte_present(entry))
  447. goto out;
  448. folio = pfn_folio(pte_pfn(entry));
  449. folio_get(folio);
  450. if (pte_young(entry) || !folio_test_idle(folio) ||
  451. mmu_notifier_test_young(walk->mm, addr))
  452. priv->young = true;
  453. *priv->folio_sz = huge_page_size(h);
  454. folio_put(folio);
  455. out:
  456. spin_unlock(ptl);
  457. return 0;
  458. }
  459. #else
  460. #define damon_young_hugetlb_entry NULL
  461. #endif /* CONFIG_HUGETLB_PAGE */
  462. static const struct mm_walk_ops damon_young_ops = {
  463. .pmd_entry = damon_young_pmd_entry,
  464. .hugetlb_entry = damon_young_hugetlb_entry,
  465. .walk_lock = PGWALK_RDLOCK,
  466. };
  467. static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
  468. unsigned long *folio_sz)
  469. {
  470. struct damon_young_walk_private arg = {
  471. .folio_sz = folio_sz,
  472. .young = false,
  473. };
  474. mmap_read_lock(mm);
  475. walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
  476. mmap_read_unlock(mm);
  477. return arg.young;
  478. }
  479. /*
  480. * Check whether the region was accessed after the last preparation
  481. *
  482. * mm 'mm_struct' for the given virtual address space
  483. * r the region to be checked
  484. */
  485. static void __damon_va_check_access(struct mm_struct *mm,
  486. struct damon_region *r, bool same_target,
  487. struct damon_attrs *attrs)
  488. {
  489. static unsigned long last_addr;
  490. static unsigned long last_folio_sz = PAGE_SIZE;
  491. static bool last_accessed;
  492. if (!mm) {
  493. damon_update_region_access_rate(r, false, attrs);
  494. return;
  495. }
  496. /* If the region is in the last checked page, reuse the result */
  497. if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
  498. ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
  499. damon_update_region_access_rate(r, last_accessed, attrs);
  500. return;
  501. }
  502. last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
  503. damon_update_region_access_rate(r, last_accessed, attrs);
  504. last_addr = r->sampling_addr;
  505. }
  506. static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
  507. {
  508. struct damon_target *t;
  509. struct mm_struct *mm;
  510. struct damon_region *r;
  511. unsigned int max_nr_accesses = 0;
  512. bool same_target;
  513. damon_for_each_target(t, ctx) {
  514. mm = damon_get_mm(t);
  515. same_target = false;
  516. damon_for_each_region(r, t) {
  517. __damon_va_check_access(mm, r, same_target,
  518. &ctx->attrs);
  519. max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
  520. same_target = true;
  521. }
  522. if (mm)
  523. mmput(mm);
  524. }
  525. return max_nr_accesses;
  526. }
  527. /*
  528. * Functions for the target validity check and cleanup
  529. */
  530. static bool damon_va_target_valid(struct damon_target *t)
  531. {
  532. struct task_struct *task;
  533. task = damon_get_task_struct(t);
  534. if (task) {
  535. put_task_struct(task);
  536. return true;
  537. }
  538. return false;
  539. }
  540. #ifndef CONFIG_ADVISE_SYSCALLS
  541. static unsigned long damos_madvise(struct damon_target *target,
  542. struct damon_region *r, int behavior)
  543. {
  544. return 0;
  545. }
  546. #else
  547. static unsigned long damos_madvise(struct damon_target *target,
  548. struct damon_region *r, int behavior)
  549. {
  550. struct mm_struct *mm;
  551. unsigned long start = PAGE_ALIGN(r->ar.start);
  552. unsigned long len = PAGE_ALIGN(damon_sz_region(r));
  553. unsigned long applied;
  554. mm = damon_get_mm(target);
  555. if (!mm)
  556. return 0;
  557. applied = do_madvise(mm, start, len, behavior) ? 0 : len;
  558. mmput(mm);
  559. return applied;
  560. }
  561. #endif /* CONFIG_ADVISE_SYSCALLS */
  562. static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
  563. struct damon_target *t, struct damon_region *r,
  564. struct damos *scheme)
  565. {
  566. int madv_action;
  567. switch (scheme->action) {
  568. case DAMOS_WILLNEED:
  569. madv_action = MADV_WILLNEED;
  570. break;
  571. case DAMOS_COLD:
  572. madv_action = MADV_COLD;
  573. break;
  574. case DAMOS_PAGEOUT:
  575. madv_action = MADV_PAGEOUT;
  576. break;
  577. case DAMOS_HUGEPAGE:
  578. madv_action = MADV_HUGEPAGE;
  579. break;
  580. case DAMOS_NOHUGEPAGE:
  581. madv_action = MADV_NOHUGEPAGE;
  582. break;
  583. case DAMOS_STAT:
  584. return 0;
  585. default:
  586. /*
  587. * DAMOS actions that are not yet supported by 'vaddr'.
  588. */
  589. return 0;
  590. }
  591. return damos_madvise(t, r, madv_action);
  592. }
  593. static int damon_va_scheme_score(struct damon_ctx *context,
  594. struct damon_target *t, struct damon_region *r,
  595. struct damos *scheme)
  596. {
  597. switch (scheme->action) {
  598. case DAMOS_PAGEOUT:
  599. return damon_cold_score(context, r, scheme);
  600. default:
  601. break;
  602. }
  603. return DAMOS_MAX_SCORE;
  604. }
  605. static int __init damon_va_initcall(void)
  606. {
  607. struct damon_operations ops = {
  608. .id = DAMON_OPS_VADDR,
  609. .init = damon_va_init,
  610. .update = damon_va_update,
  611. .prepare_access_checks = damon_va_prepare_access_checks,
  612. .check_accesses = damon_va_check_accesses,
  613. .reset_aggregated = NULL,
  614. .target_valid = damon_va_target_valid,
  615. .cleanup = NULL,
  616. .apply_scheme = damon_va_apply_scheme,
  617. .get_scheme_score = damon_va_scheme_score,
  618. };
  619. /* ops for fixed virtual address ranges */
  620. struct damon_operations ops_fvaddr = ops;
  621. int err;
  622. /* Don't set the monitoring target regions for the entire mapping */
  623. ops_fvaddr.id = DAMON_OPS_FVADDR;
  624. ops_fvaddr.init = NULL;
  625. ops_fvaddr.update = NULL;
  626. err = damon_register_ops(&ops);
  627. if (err)
  628. return err;
  629. return damon_register_ops(&ops_fvaddr);
  630. };
  631. subsys_initcall(damon_va_initcall);
  632. #include "tests/vaddr-kunit.h"