hugetlb_cgroup.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. /*
  2. *
  3. * Copyright IBM Corporation, 2012
  4. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5. *
  6. * Cgroup v2
  7. * Copyright (C) 2019 Red Hat, Inc.
  8. * Author: Giuseppe Scrivano <gscrivan@redhat.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of version 2.1 of the GNU Lesser General Public License
  12. * as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it would be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. */
  19. #include <linux/cgroup.h>
  20. #include <linux/page_counter.h>
  21. #include <linux/slab.h>
  22. #include <linux/hugetlb.h>
  23. #include <linux/hugetlb_cgroup.h>
  24. #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
  25. #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
  26. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  27. /* Use t->m[0] to encode the offset */
  28. #define MEMFILE_OFFSET(t, m0) (((offsetof(t, m0) << 16) | sizeof_field(t, m0)))
  29. #define MEMFILE_OFFSET0(val) (((val) >> 16) & 0xffff)
  30. #define MEMFILE_FIELD_SIZE(val) ((val) & 0xffff)
  31. #define DFL_TMPL_SIZE ARRAY_SIZE(hugetlb_dfl_tmpl)
  32. #define LEGACY_TMPL_SIZE ARRAY_SIZE(hugetlb_legacy_tmpl)
  33. static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
  34. static struct cftype *dfl_files;
  35. static struct cftype *legacy_files;
  36. static inline struct page_counter *
  37. __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
  38. bool rsvd)
  39. {
  40. if (rsvd)
  41. return &h_cg->rsvd_hugepage[idx];
  42. return &h_cg->hugepage[idx];
  43. }
  44. static inline struct page_counter *
  45. hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
  46. {
  47. return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
  48. }
  49. static inline struct page_counter *
  50. hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
  51. {
  52. return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
  53. }
  54. static inline
  55. struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
  56. {
  57. return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
  58. }
  59. static inline
  60. struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
  61. {
  62. return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
  63. }
  64. static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
  65. {
  66. return (h_cg == root_h_cgroup);
  67. }
  68. static inline struct hugetlb_cgroup *
  69. parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
  70. {
  71. return hugetlb_cgroup_from_css(h_cg->css.parent);
  72. }
  73. static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
  74. {
  75. struct hstate *h;
  76. for_each_hstate(h) {
  77. if (page_counter_read(
  78. hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
  79. return true;
  80. }
  81. return false;
  82. }
  83. static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
  84. struct hugetlb_cgroup *parent_h_cgroup)
  85. {
  86. int idx;
  87. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
  88. struct page_counter *fault_parent = NULL;
  89. struct page_counter *rsvd_parent = NULL;
  90. unsigned long limit;
  91. int ret;
  92. if (parent_h_cgroup) {
  93. fault_parent = hugetlb_cgroup_counter_from_cgroup(
  94. parent_h_cgroup, idx);
  95. rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
  96. parent_h_cgroup, idx);
  97. }
  98. page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
  99. idx),
  100. fault_parent, false);
  101. page_counter_init(
  102. hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
  103. rsvd_parent, false);
  104. limit = round_down(PAGE_COUNTER_MAX,
  105. pages_per_huge_page(&hstates[idx]));
  106. ret = page_counter_set_max(
  107. hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
  108. limit);
  109. VM_BUG_ON(ret);
  110. ret = page_counter_set_max(
  111. hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
  112. limit);
  113. VM_BUG_ON(ret);
  114. }
  115. }
  116. static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
  117. {
  118. int node;
  119. for_each_node(node)
  120. kfree(h_cgroup->nodeinfo[node]);
  121. kfree(h_cgroup);
  122. }
  123. static struct cgroup_subsys_state *
  124. hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
  125. {
  126. struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
  127. struct hugetlb_cgroup *h_cgroup;
  128. int node;
  129. h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
  130. GFP_KERNEL);
  131. if (!h_cgroup)
  132. return ERR_PTR(-ENOMEM);
  133. if (!parent_h_cgroup)
  134. root_h_cgroup = h_cgroup;
  135. /*
  136. * TODO: this routine can waste much memory for nodes which will
  137. * never be onlined. It's better to use memory hotplug callback
  138. * function.
  139. */
  140. for_each_node(node) {
  141. /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
  142. int node_to_alloc =
  143. node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
  144. h_cgroup->nodeinfo[node] =
  145. kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
  146. GFP_KERNEL, node_to_alloc);
  147. if (!h_cgroup->nodeinfo[node])
  148. goto fail_alloc_nodeinfo;
  149. }
  150. hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
  151. return &h_cgroup->css;
  152. fail_alloc_nodeinfo:
  153. hugetlb_cgroup_free(h_cgroup);
  154. return ERR_PTR(-ENOMEM);
  155. }
  156. static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
  157. {
  158. hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
  159. }
  160. /*
  161. * Should be called with hugetlb_lock held.
  162. * Since we are holding hugetlb_lock, pages cannot get moved from
  163. * active list or uncharged from the cgroup, So no need to get
  164. * page reference and test for page active here. This function
  165. * cannot fail.
  166. */
  167. static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
  168. struct page *page)
  169. {
  170. unsigned int nr_pages;
  171. struct page_counter *counter;
  172. struct hugetlb_cgroup *page_hcg;
  173. struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
  174. struct folio *folio = page_folio(page);
  175. page_hcg = hugetlb_cgroup_from_folio(folio);
  176. /*
  177. * We can have pages in active list without any cgroup
  178. * ie, hugepage with less than 3 pages. We can safely
  179. * ignore those pages.
  180. */
  181. if (!page_hcg || page_hcg != h_cg)
  182. goto out;
  183. nr_pages = compound_nr(page);
  184. if (!parent) {
  185. parent = root_h_cgroup;
  186. /* root has no limit */
  187. page_counter_charge(&parent->hugepage[idx], nr_pages);
  188. }
  189. counter = &h_cg->hugepage[idx];
  190. /* Take the pages off the local counter */
  191. page_counter_cancel(counter, nr_pages);
  192. set_hugetlb_cgroup(folio, parent);
  193. out:
  194. return;
  195. }
  196. /*
  197. * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
  198. * the parent cgroup.
  199. */
  200. static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
  201. {
  202. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
  203. struct hstate *h;
  204. struct page *page;
  205. do {
  206. for_each_hstate(h) {
  207. spin_lock_irq(&hugetlb_lock);
  208. list_for_each_entry(page, &h->hugepage_activelist, lru)
  209. hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
  210. spin_unlock_irq(&hugetlb_lock);
  211. }
  212. cond_resched();
  213. } while (hugetlb_cgroup_have_usage(h_cg));
  214. }
  215. static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
  216. enum hugetlb_memory_event event)
  217. {
  218. atomic_long_inc(&hugetlb->events_local[idx][event]);
  219. cgroup_file_notify(&hugetlb->events_local_file[idx]);
  220. do {
  221. atomic_long_inc(&hugetlb->events[idx][event]);
  222. cgroup_file_notify(&hugetlb->events_file[idx]);
  223. } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
  224. !hugetlb_cgroup_is_root(hugetlb));
  225. }
  226. static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  227. struct hugetlb_cgroup **ptr,
  228. bool rsvd)
  229. {
  230. int ret = 0;
  231. struct page_counter *counter;
  232. struct hugetlb_cgroup *h_cg = NULL;
  233. if (hugetlb_cgroup_disabled())
  234. goto done;
  235. again:
  236. rcu_read_lock();
  237. h_cg = hugetlb_cgroup_from_task(current);
  238. if (!css_tryget(&h_cg->css)) {
  239. rcu_read_unlock();
  240. goto again;
  241. }
  242. rcu_read_unlock();
  243. if (!page_counter_try_charge(
  244. __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
  245. nr_pages, &counter)) {
  246. ret = -ENOMEM;
  247. hugetlb_event(h_cg, idx, HUGETLB_MAX);
  248. css_put(&h_cg->css);
  249. goto done;
  250. }
  251. /* Reservations take a reference to the css because they do not get
  252. * reparented.
  253. */
  254. if (!rsvd)
  255. css_put(&h_cg->css);
  256. done:
  257. *ptr = h_cg;
  258. return ret;
  259. }
  260. int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  261. struct hugetlb_cgroup **ptr)
  262. {
  263. return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
  264. }
  265. int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
  266. struct hugetlb_cgroup **ptr)
  267. {
  268. return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
  269. }
  270. /* Should be called with hugetlb_lock held */
  271. static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  272. struct hugetlb_cgroup *h_cg,
  273. struct folio *folio, bool rsvd)
  274. {
  275. if (hugetlb_cgroup_disabled() || !h_cg)
  276. return;
  277. lockdep_assert_held(&hugetlb_lock);
  278. __set_hugetlb_cgroup(folio, h_cg, rsvd);
  279. if (!rsvd) {
  280. unsigned long usage =
  281. h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
  282. /*
  283. * This write is not atomic due to fetching usage and writing
  284. * to it, but that's fine because we call this with
  285. * hugetlb_lock held anyway.
  286. */
  287. WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
  288. usage + nr_pages);
  289. }
  290. }
  291. void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  292. struct hugetlb_cgroup *h_cg,
  293. struct folio *folio)
  294. {
  295. __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
  296. }
  297. void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
  298. struct hugetlb_cgroup *h_cg,
  299. struct folio *folio)
  300. {
  301. __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
  302. }
  303. /*
  304. * Should be called with hugetlb_lock held
  305. */
  306. static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
  307. struct folio *folio, bool rsvd)
  308. {
  309. struct hugetlb_cgroup *h_cg;
  310. if (hugetlb_cgroup_disabled())
  311. return;
  312. lockdep_assert_held(&hugetlb_lock);
  313. h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
  314. if (unlikely(!h_cg))
  315. return;
  316. __set_hugetlb_cgroup(folio, NULL, rsvd);
  317. page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
  318. rsvd),
  319. nr_pages);
  320. if (rsvd)
  321. css_put(&h_cg->css);
  322. else {
  323. unsigned long usage =
  324. h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
  325. /*
  326. * This write is not atomic due to fetching usage and writing
  327. * to it, but that's fine because we call this with
  328. * hugetlb_lock held anyway.
  329. */
  330. WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
  331. usage - nr_pages);
  332. }
  333. }
  334. void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
  335. struct folio *folio)
  336. {
  337. __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
  338. }
  339. void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
  340. struct folio *folio)
  341. {
  342. __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
  343. }
  344. static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  345. struct hugetlb_cgroup *h_cg,
  346. bool rsvd)
  347. {
  348. if (hugetlb_cgroup_disabled() || !h_cg)
  349. return;
  350. page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
  351. rsvd),
  352. nr_pages);
  353. if (rsvd)
  354. css_put(&h_cg->css);
  355. }
  356. void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  357. struct hugetlb_cgroup *h_cg)
  358. {
  359. __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
  360. }
  361. void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
  362. struct hugetlb_cgroup *h_cg)
  363. {
  364. __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
  365. }
  366. void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
  367. unsigned long end)
  368. {
  369. if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
  370. !resv->css)
  371. return;
  372. page_counter_uncharge(resv->reservation_counter,
  373. (end - start) * resv->pages_per_hpage);
  374. css_put(resv->css);
  375. }
  376. void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
  377. struct file_region *rg,
  378. unsigned long nr_pages,
  379. bool region_del)
  380. {
  381. if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
  382. return;
  383. if (rg->reservation_counter && resv->pages_per_hpage &&
  384. !resv->reservation_counter) {
  385. page_counter_uncharge(rg->reservation_counter,
  386. nr_pages * resv->pages_per_hpage);
  387. /*
  388. * Only do css_put(rg->css) when we delete the entire region
  389. * because one file_region must hold exactly one css reference.
  390. */
  391. if (region_del)
  392. css_put(rg->css);
  393. }
  394. }
  395. enum {
  396. RES_USAGE,
  397. RES_RSVD_USAGE,
  398. RES_LIMIT,
  399. RES_RSVD_LIMIT,
  400. RES_MAX_USAGE,
  401. RES_RSVD_MAX_USAGE,
  402. RES_FAILCNT,
  403. RES_RSVD_FAILCNT,
  404. };
  405. static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
  406. {
  407. int nid;
  408. struct cftype *cft = seq_cft(seq);
  409. int idx = MEMFILE_IDX(cft->private);
  410. bool legacy = !cgroup_subsys_on_dfl(hugetlb_cgrp_subsys);
  411. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
  412. struct cgroup_subsys_state *css;
  413. unsigned long usage;
  414. if (legacy) {
  415. /* Add up usage across all nodes for the non-hierarchical total. */
  416. usage = 0;
  417. for_each_node_state(nid, N_MEMORY)
  418. usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
  419. seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
  420. /* Simply print the per-node usage for the non-hierarchical total. */
  421. for_each_node_state(nid, N_MEMORY)
  422. seq_printf(seq, " N%d=%lu", nid,
  423. READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
  424. PAGE_SIZE);
  425. seq_putc(seq, '\n');
  426. }
  427. /*
  428. * The hierarchical total is pretty much the value recorded by the
  429. * counter, so use that.
  430. */
  431. seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
  432. page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
  433. /*
  434. * For each node, transverse the css tree to obtain the hierarchical
  435. * node usage.
  436. */
  437. for_each_node_state(nid, N_MEMORY) {
  438. usage = 0;
  439. rcu_read_lock();
  440. css_for_each_descendant_pre(css, &h_cg->css) {
  441. usage += READ_ONCE(hugetlb_cgroup_from_css(css)
  442. ->nodeinfo[nid]
  443. ->usage[idx]);
  444. }
  445. rcu_read_unlock();
  446. seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
  447. }
  448. seq_putc(seq, '\n');
  449. return 0;
  450. }
  451. static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
  452. struct cftype *cft)
  453. {
  454. struct page_counter *counter;
  455. struct page_counter *rsvd_counter;
  456. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
  457. counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
  458. rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
  459. switch (MEMFILE_ATTR(cft->private)) {
  460. case RES_USAGE:
  461. return (u64)page_counter_read(counter) * PAGE_SIZE;
  462. case RES_RSVD_USAGE:
  463. return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
  464. case RES_LIMIT:
  465. return (u64)counter->max * PAGE_SIZE;
  466. case RES_RSVD_LIMIT:
  467. return (u64)rsvd_counter->max * PAGE_SIZE;
  468. case RES_MAX_USAGE:
  469. return (u64)counter->watermark * PAGE_SIZE;
  470. case RES_RSVD_MAX_USAGE:
  471. return (u64)rsvd_counter->watermark * PAGE_SIZE;
  472. case RES_FAILCNT:
  473. return counter->failcnt;
  474. case RES_RSVD_FAILCNT:
  475. return rsvd_counter->failcnt;
  476. default:
  477. BUG();
  478. }
  479. }
  480. static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
  481. {
  482. int idx;
  483. u64 val;
  484. struct cftype *cft = seq_cft(seq);
  485. unsigned long limit;
  486. struct page_counter *counter;
  487. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
  488. idx = MEMFILE_IDX(cft->private);
  489. counter = &h_cg->hugepage[idx];
  490. limit = round_down(PAGE_COUNTER_MAX,
  491. pages_per_huge_page(&hstates[idx]));
  492. switch (MEMFILE_ATTR(cft->private)) {
  493. case RES_RSVD_USAGE:
  494. counter = &h_cg->rsvd_hugepage[idx];
  495. fallthrough;
  496. case RES_USAGE:
  497. val = (u64)page_counter_read(counter);
  498. seq_printf(seq, "%llu\n", val * PAGE_SIZE);
  499. break;
  500. case RES_RSVD_LIMIT:
  501. counter = &h_cg->rsvd_hugepage[idx];
  502. fallthrough;
  503. case RES_LIMIT:
  504. val = (u64)counter->max;
  505. if (val == limit)
  506. seq_puts(seq, "max\n");
  507. else
  508. seq_printf(seq, "%llu\n", val * PAGE_SIZE);
  509. break;
  510. default:
  511. BUG();
  512. }
  513. return 0;
  514. }
  515. static DEFINE_MUTEX(hugetlb_limit_mutex);
  516. static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
  517. char *buf, size_t nbytes, loff_t off,
  518. const char *max)
  519. {
  520. int ret, idx;
  521. unsigned long nr_pages;
  522. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
  523. bool rsvd = false;
  524. if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
  525. return -EINVAL;
  526. buf = strstrip(buf);
  527. ret = page_counter_memparse(buf, max, &nr_pages);
  528. if (ret)
  529. return ret;
  530. idx = MEMFILE_IDX(of_cft(of)->private);
  531. nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
  532. switch (MEMFILE_ATTR(of_cft(of)->private)) {
  533. case RES_RSVD_LIMIT:
  534. rsvd = true;
  535. fallthrough;
  536. case RES_LIMIT:
  537. mutex_lock(&hugetlb_limit_mutex);
  538. ret = page_counter_set_max(
  539. __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
  540. nr_pages);
  541. mutex_unlock(&hugetlb_limit_mutex);
  542. break;
  543. default:
  544. ret = -EINVAL;
  545. break;
  546. }
  547. return ret ?: nbytes;
  548. }
  549. static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
  550. char *buf, size_t nbytes, loff_t off)
  551. {
  552. return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
  553. }
  554. static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
  555. char *buf, size_t nbytes, loff_t off)
  556. {
  557. return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
  558. }
  559. static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
  560. char *buf, size_t nbytes, loff_t off)
  561. {
  562. int ret = 0;
  563. struct page_counter *counter, *rsvd_counter;
  564. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
  565. counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
  566. rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
  567. switch (MEMFILE_ATTR(of_cft(of)->private)) {
  568. case RES_MAX_USAGE:
  569. page_counter_reset_watermark(counter);
  570. break;
  571. case RES_RSVD_MAX_USAGE:
  572. page_counter_reset_watermark(rsvd_counter);
  573. break;
  574. case RES_FAILCNT:
  575. counter->failcnt = 0;
  576. break;
  577. case RES_RSVD_FAILCNT:
  578. rsvd_counter->failcnt = 0;
  579. break;
  580. default:
  581. ret = -EINVAL;
  582. break;
  583. }
  584. return ret ?: nbytes;
  585. }
  586. static char *mem_fmt(char *buf, int size, unsigned long hsize)
  587. {
  588. if (hsize >= SZ_1G)
  589. snprintf(buf, size, "%luGB", hsize / SZ_1G);
  590. else if (hsize >= SZ_1M)
  591. snprintf(buf, size, "%luMB", hsize / SZ_1M);
  592. else
  593. snprintf(buf, size, "%luKB", hsize / SZ_1K);
  594. return buf;
  595. }
  596. static int __hugetlb_events_show(struct seq_file *seq, bool local)
  597. {
  598. int idx;
  599. long max;
  600. struct cftype *cft = seq_cft(seq);
  601. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
  602. idx = MEMFILE_IDX(cft->private);
  603. if (local)
  604. max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
  605. else
  606. max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
  607. seq_printf(seq, "max %lu\n", max);
  608. return 0;
  609. }
  610. static int hugetlb_events_show(struct seq_file *seq, void *v)
  611. {
  612. return __hugetlb_events_show(seq, false);
  613. }
  614. static int hugetlb_events_local_show(struct seq_file *seq, void *v)
  615. {
  616. return __hugetlb_events_show(seq, true);
  617. }
  618. static struct cftype hugetlb_dfl_tmpl[] = {
  619. {
  620. .name = "max",
  621. .private = RES_LIMIT,
  622. .seq_show = hugetlb_cgroup_read_u64_max,
  623. .write = hugetlb_cgroup_write_dfl,
  624. .flags = CFTYPE_NOT_ON_ROOT,
  625. },
  626. {
  627. .name = "rsvd.max",
  628. .private = RES_RSVD_LIMIT,
  629. .seq_show = hugetlb_cgroup_read_u64_max,
  630. .write = hugetlb_cgroup_write_dfl,
  631. .flags = CFTYPE_NOT_ON_ROOT,
  632. },
  633. {
  634. .name = "current",
  635. .private = RES_USAGE,
  636. .seq_show = hugetlb_cgroup_read_u64_max,
  637. .flags = CFTYPE_NOT_ON_ROOT,
  638. },
  639. {
  640. .name = "rsvd.current",
  641. .private = RES_RSVD_USAGE,
  642. .seq_show = hugetlb_cgroup_read_u64_max,
  643. .flags = CFTYPE_NOT_ON_ROOT,
  644. },
  645. {
  646. .name = "events",
  647. .seq_show = hugetlb_events_show,
  648. .file_offset = MEMFILE_OFFSET(struct hugetlb_cgroup, events_file[0]),
  649. .flags = CFTYPE_NOT_ON_ROOT,
  650. },
  651. {
  652. .name = "events.local",
  653. .seq_show = hugetlb_events_local_show,
  654. .file_offset = MEMFILE_OFFSET(struct hugetlb_cgroup, events_local_file[0]),
  655. .flags = CFTYPE_NOT_ON_ROOT,
  656. },
  657. {
  658. .name = "numa_stat",
  659. .seq_show = hugetlb_cgroup_read_numa_stat,
  660. .flags = CFTYPE_NOT_ON_ROOT,
  661. },
  662. /* don't need terminator here */
  663. };
  664. static struct cftype hugetlb_legacy_tmpl[] = {
  665. {
  666. .name = "limit_in_bytes",
  667. .private = RES_LIMIT,
  668. .read_u64 = hugetlb_cgroup_read_u64,
  669. .write = hugetlb_cgroup_write_legacy,
  670. },
  671. {
  672. .name = "rsvd.limit_in_bytes",
  673. .private = RES_RSVD_LIMIT,
  674. .read_u64 = hugetlb_cgroup_read_u64,
  675. .write = hugetlb_cgroup_write_legacy,
  676. },
  677. {
  678. .name = "usage_in_bytes",
  679. .private = RES_USAGE,
  680. .read_u64 = hugetlb_cgroup_read_u64,
  681. },
  682. {
  683. .name = "rsvd.usage_in_bytes",
  684. .private = RES_RSVD_USAGE,
  685. .read_u64 = hugetlb_cgroup_read_u64,
  686. },
  687. {
  688. .name = "max_usage_in_bytes",
  689. .private = RES_MAX_USAGE,
  690. .write = hugetlb_cgroup_reset,
  691. .read_u64 = hugetlb_cgroup_read_u64,
  692. },
  693. {
  694. .name = "rsvd.max_usage_in_bytes",
  695. .private = RES_RSVD_MAX_USAGE,
  696. .write = hugetlb_cgroup_reset,
  697. .read_u64 = hugetlb_cgroup_read_u64,
  698. },
  699. {
  700. .name = "failcnt",
  701. .private = RES_FAILCNT,
  702. .write = hugetlb_cgroup_reset,
  703. .read_u64 = hugetlb_cgroup_read_u64,
  704. },
  705. {
  706. .name = "rsvd.failcnt",
  707. .private = RES_RSVD_FAILCNT,
  708. .write = hugetlb_cgroup_reset,
  709. .read_u64 = hugetlb_cgroup_read_u64,
  710. },
  711. {
  712. .name = "numa_stat",
  713. .seq_show = hugetlb_cgroup_read_numa_stat,
  714. },
  715. /* don't need terminator here */
  716. };
  717. static void __init
  718. hugetlb_cgroup_cfttypes_init(struct hstate *h, struct cftype *cft,
  719. struct cftype *tmpl, int tmpl_size)
  720. {
  721. char buf[32];
  722. int i, idx = hstate_index(h);
  723. /* format the size */
  724. mem_fmt(buf, sizeof(buf), huge_page_size(h));
  725. for (i = 0; i < tmpl_size; cft++, tmpl++, i++) {
  726. *cft = *tmpl;
  727. /* rebuild the name */
  728. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.%s", buf, tmpl->name);
  729. /* rebuild the private */
  730. cft->private = MEMFILE_PRIVATE(idx, tmpl->private);
  731. /* rebuild the file_offset */
  732. if (tmpl->file_offset) {
  733. unsigned int offset = tmpl->file_offset;
  734. cft->file_offset = MEMFILE_OFFSET0(offset) +
  735. MEMFILE_FIELD_SIZE(offset) * idx;
  736. }
  737. lockdep_register_key(&cft->lockdep_key);
  738. }
  739. }
  740. static void __init __hugetlb_cgroup_file_dfl_init(struct hstate *h)
  741. {
  742. int idx = hstate_index(h);
  743. hugetlb_cgroup_cfttypes_init(h, dfl_files + idx * DFL_TMPL_SIZE,
  744. hugetlb_dfl_tmpl, DFL_TMPL_SIZE);
  745. }
  746. static void __init __hugetlb_cgroup_file_legacy_init(struct hstate *h)
  747. {
  748. int idx = hstate_index(h);
  749. hugetlb_cgroup_cfttypes_init(h, legacy_files + idx * LEGACY_TMPL_SIZE,
  750. hugetlb_legacy_tmpl, LEGACY_TMPL_SIZE);
  751. }
  752. static void __init __hugetlb_cgroup_file_init(struct hstate *h)
  753. {
  754. __hugetlb_cgroup_file_dfl_init(h);
  755. __hugetlb_cgroup_file_legacy_init(h);
  756. }
  757. static void __init __hugetlb_cgroup_file_pre_init(void)
  758. {
  759. int cft_count;
  760. cft_count = hugetlb_max_hstate * DFL_TMPL_SIZE + 1; /* add terminator */
  761. dfl_files = kcalloc(cft_count, sizeof(struct cftype), GFP_KERNEL);
  762. BUG_ON(!dfl_files);
  763. cft_count = hugetlb_max_hstate * LEGACY_TMPL_SIZE + 1; /* add terminator */
  764. legacy_files = kcalloc(cft_count, sizeof(struct cftype), GFP_KERNEL);
  765. BUG_ON(!legacy_files);
  766. }
  767. static void __init __hugetlb_cgroup_file_post_init(void)
  768. {
  769. WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
  770. dfl_files));
  771. WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
  772. legacy_files));
  773. }
  774. void __init hugetlb_cgroup_file_init(void)
  775. {
  776. struct hstate *h;
  777. __hugetlb_cgroup_file_pre_init();
  778. for_each_hstate(h)
  779. __hugetlb_cgroup_file_init(h);
  780. __hugetlb_cgroup_file_post_init();
  781. }
  782. /*
  783. * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
  784. * when we migrate hugepages
  785. */
  786. void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
  787. {
  788. struct hugetlb_cgroup *h_cg;
  789. struct hugetlb_cgroup *h_cg_rsvd;
  790. struct hstate *h = folio_hstate(old_folio);
  791. if (hugetlb_cgroup_disabled())
  792. return;
  793. spin_lock_irq(&hugetlb_lock);
  794. h_cg = hugetlb_cgroup_from_folio(old_folio);
  795. h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
  796. set_hugetlb_cgroup(old_folio, NULL);
  797. set_hugetlb_cgroup_rsvd(old_folio, NULL);
  798. /* move the h_cg details to new cgroup */
  799. set_hugetlb_cgroup(new_folio, h_cg);
  800. set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
  801. list_move(&new_folio->lru, &h->hugepage_activelist);
  802. spin_unlock_irq(&hugetlb_lock);
  803. return;
  804. }
  805. static struct cftype hugetlb_files[] = {
  806. {} /* terminate */
  807. };
  808. struct cgroup_subsys hugetlb_cgrp_subsys = {
  809. .css_alloc = hugetlb_cgroup_css_alloc,
  810. .css_offline = hugetlb_cgroup_css_offline,
  811. .css_free = hugetlb_cgroup_css_free,
  812. .dfl_cftypes = hugetlb_files,
  813. .legacy_cftypes = hugetlb_files,
  814. };