memcontrol-v1.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef __MM_MEMCONTROL_V1_H
  3. #define __MM_MEMCONTROL_V1_H
  4. #include <linux/cgroup-defs.h>
  5. /* Cgroup v1 and v2 common declarations */
  6. int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
  7. unsigned int nr_pages);
  8. static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
  9. unsigned int nr_pages)
  10. {
  11. if (mem_cgroup_is_root(memcg))
  12. return 0;
  13. return try_charge_memcg(memcg, gfp_mask, nr_pages);
  14. }
  15. void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
  16. void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
  17. /*
  18. * Iteration constructs for visiting all cgroups (under a tree). If
  19. * loops are exited prematurely (break), mem_cgroup_iter_break() must
  20. * be used for reference counting.
  21. */
  22. #define for_each_mem_cgroup_tree(iter, root) \
  23. for (iter = mem_cgroup_iter(root, NULL, NULL); \
  24. iter != NULL; \
  25. iter = mem_cgroup_iter(root, iter, NULL))
  26. #define for_each_mem_cgroup(iter) \
  27. for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
  28. iter != NULL; \
  29. iter = mem_cgroup_iter(NULL, iter, NULL))
  30. /* Whether legacy memory+swap accounting is active */
  31. static inline bool do_memsw_account(void)
  32. {
  33. return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
  34. }
  35. /*
  36. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  37. * it will be incremented by the number of pages. This counter is used
  38. * to trigger some periodic events. This is straightforward and better
  39. * than using jiffies etc. to handle periodic memcg event.
  40. */
  41. enum mem_cgroup_events_target {
  42. MEM_CGROUP_TARGET_THRESH,
  43. MEM_CGROUP_TARGET_SOFTLIMIT,
  44. MEM_CGROUP_NTARGETS,
  45. };
  46. unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
  47. void drain_all_stock(struct mem_cgroup *root_memcg);
  48. unsigned long memcg_events(struct mem_cgroup *memcg, int event);
  49. unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
  50. unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
  51. unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
  52. unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
  53. int memory_stat_show(struct seq_file *m, void *v);
  54. /* Cgroup v1-specific declarations */
  55. #ifdef CONFIG_MEMCG_V1
  56. bool memcg1_alloc_events(struct mem_cgroup *memcg);
  57. void memcg1_free_events(struct mem_cgroup *memcg);
  58. void memcg1_memcg_init(struct mem_cgroup *memcg);
  59. void memcg1_remove_from_trees(struct mem_cgroup *memcg);
  60. static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
  61. {
  62. WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
  63. }
  64. bool memcg1_wait_acct_move(struct mem_cgroup *memcg);
  65. struct cgroup_taskset;
  66. int memcg1_can_attach(struct cgroup_taskset *tset);
  67. void memcg1_cancel_attach(struct cgroup_taskset *tset);
  68. void memcg1_move_task(void);
  69. void memcg1_css_offline(struct mem_cgroup *memcg);
  70. /* for encoding cft->private value on file */
  71. enum res_type {
  72. _MEM,
  73. _MEMSWAP,
  74. _KMEM,
  75. _TCP,
  76. };
  77. bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
  78. void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
  79. void memcg1_oom_recover(struct mem_cgroup *memcg);
  80. void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
  81. void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg);
  82. void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
  83. unsigned long nr_memory, int nid);
  84. void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
  85. void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages);
  86. static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg)
  87. {
  88. return memcg->tcpmem_active;
  89. }
  90. bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
  91. gfp_t gfp_mask);
  92. static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
  93. {
  94. page_counter_uncharge(&memcg->tcpmem, nr_pages);
  95. }
  96. extern struct cftype memsw_files[];
  97. extern struct cftype mem_cgroup_legacy_files[];
  98. #else /* CONFIG_MEMCG_V1 */
  99. static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
  100. static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
  101. static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {}
  102. static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {}
  103. static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {}
  104. static inline bool memcg1_wait_acct_move(struct mem_cgroup *memcg) { return false; }
  105. static inline void memcg1_css_offline(struct mem_cgroup *memcg) {}
  106. static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) { return true; }
  107. static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
  108. static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
  109. static inline void memcg1_commit_charge(struct folio *folio,
  110. struct mem_cgroup *memcg) {}
  111. static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {}
  112. static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
  113. unsigned long pgpgout,
  114. unsigned long nr_memory, int nid) {}
  115. static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
  116. static inline void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages) {}
  117. static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg) { return false; }
  118. static inline bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
  119. gfp_t gfp_mask) { return true; }
  120. static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) {}
  121. #endif /* CONFIG_MEMCG_V1 */
  122. #endif /* __MM_MEMCONTROL_V1_H */