ops-common.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Common Primitives for Data Access Monitoring
  4. *
  5. * Author: SeongJae Park <sj@kernel.org>
  6. */
  7. #include <linux/mmu_notifier.h>
  8. #include <linux/page_idle.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/rmap.h>
  11. #include "ops-common.h"
  12. /*
  13. * Get an online page for a pfn if it's in the LRU list. Otherwise, returns
  14. * NULL.
  15. *
  16. * The body of this function is stolen from the 'page_idle_get_folio()'. We
  17. * steal rather than reuse it because the code is quite simple.
  18. */
  19. struct folio *damon_get_folio(unsigned long pfn)
  20. {
  21. struct page *page = pfn_to_online_page(pfn);
  22. struct folio *folio;
  23. if (!page || PageTail(page))
  24. return NULL;
  25. folio = page_folio(page);
  26. if (!folio_test_lru(folio) || !folio_try_get(folio))
  27. return NULL;
  28. if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
  29. folio_put(folio);
  30. folio = NULL;
  31. }
  32. return folio;
  33. }
  34. void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr)
  35. {
  36. struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte)));
  37. if (!folio)
  38. return;
  39. if (ptep_clear_young_notify(vma, addr, pte))
  40. folio_set_young(folio);
  41. folio_set_idle(folio);
  42. folio_put(folio);
  43. }
  44. void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
  45. {
  46. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  47. struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
  48. if (!folio)
  49. return;
  50. if (pmdp_clear_young_notify(vma, addr, pmd))
  51. folio_set_young(folio);
  52. folio_set_idle(folio);
  53. folio_put(folio);
  54. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  55. }
  56. #define DAMON_MAX_SUBSCORE (100)
  57. #define DAMON_MAX_AGE_IN_LOG (32)
  58. int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
  59. struct damos *s)
  60. {
  61. int freq_subscore;
  62. unsigned int age_in_sec;
  63. int age_in_log, age_subscore;
  64. unsigned int freq_weight = s->quota.weight_nr_accesses;
  65. unsigned int age_weight = s->quota.weight_age;
  66. int hotness;
  67. freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
  68. damon_max_nr_accesses(&c->attrs);
  69. age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
  70. for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
  71. age_in_log++, age_in_sec >>= 1)
  72. ;
  73. /* If frequency is 0, higher age means it's colder */
  74. if (freq_subscore == 0)
  75. age_in_log *= -1;
  76. /*
  77. * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG].
  78. * Scale it to be in [0, 100] and set it as age subscore.
  79. */
  80. age_in_log += DAMON_MAX_AGE_IN_LOG;
  81. age_subscore = age_in_log * DAMON_MAX_SUBSCORE /
  82. DAMON_MAX_AGE_IN_LOG / 2;
  83. hotness = (freq_weight * freq_subscore + age_weight * age_subscore);
  84. if (freq_weight + age_weight)
  85. hotness /= freq_weight + age_weight;
  86. /*
  87. * Transform it to fit in [0, DAMOS_MAX_SCORE]
  88. */
  89. hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
  90. return hotness;
  91. }
  92. int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
  93. struct damos *s)
  94. {
  95. int hotness = damon_hot_score(c, r, s);
  96. /* Return coldness of the region */
  97. return DAMOS_MAX_SCORE - hotness;
  98. }