dbgfs.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DAMON Debugfs Interface
  4. *
  5. * Author: SeongJae Park <sj@kernel.org>
  6. */
  7. #define pr_fmt(fmt) "damon-dbgfs: " fmt
  8. #include <linux/damon.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/file.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <linux/page_idle.h>
  14. #include <linux/slab.h>
  15. #define DAMON_DBGFS_DEPRECATION_NOTICE \
  16. "DAMON debugfs interface is deprecated, so users should move " \
  17. "to DAMON_SYSFS. If you cannot, please report your usecase to " \
  18. "damon@lists.linux.dev and linux-mm@kvack.org.\n"
  19. static struct damon_ctx **dbgfs_ctxs;
  20. static int dbgfs_nr_ctxs;
  21. static struct dentry **dbgfs_dirs;
  22. static DEFINE_MUTEX(damon_dbgfs_lock);
  23. static void damon_dbgfs_warn_deprecation(void)
  24. {
  25. pr_warn_once(DAMON_DBGFS_DEPRECATION_NOTICE);
  26. }
  27. /*
  28. * Returns non-empty string on success, negative error code otherwise.
  29. */
  30. static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
  31. {
  32. char *kbuf;
  33. ssize_t ret;
  34. /* We do not accept continuous write */
  35. if (*ppos)
  36. return ERR_PTR(-EINVAL);
  37. kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
  38. if (!kbuf)
  39. return ERR_PTR(-ENOMEM);
  40. ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
  41. if (ret != count) {
  42. kfree(kbuf);
  43. return ERR_PTR(-EIO);
  44. }
  45. kbuf[ret] = '\0';
  46. return kbuf;
  47. }
  48. static ssize_t dbgfs_attrs_read(struct file *file,
  49. char __user *buf, size_t count, loff_t *ppos)
  50. {
  51. struct damon_ctx *ctx = file->private_data;
  52. char kbuf[128];
  53. int ret;
  54. mutex_lock(&ctx->kdamond_lock);
  55. ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
  56. ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
  57. ctx->attrs.ops_update_interval,
  58. ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
  59. mutex_unlock(&ctx->kdamond_lock);
  60. return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
  61. }
  62. static ssize_t dbgfs_attrs_write(struct file *file,
  63. const char __user *buf, size_t count, loff_t *ppos)
  64. {
  65. struct damon_ctx *ctx = file->private_data;
  66. struct damon_attrs attrs;
  67. char *kbuf;
  68. ssize_t ret;
  69. kbuf = user_input_str(buf, count, ppos);
  70. if (IS_ERR(kbuf))
  71. return PTR_ERR(kbuf);
  72. if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
  73. &attrs.sample_interval, &attrs.aggr_interval,
  74. &attrs.ops_update_interval,
  75. &attrs.min_nr_regions,
  76. &attrs.max_nr_regions) != 5) {
  77. ret = -EINVAL;
  78. goto out;
  79. }
  80. mutex_lock(&ctx->kdamond_lock);
  81. if (ctx->kdamond) {
  82. ret = -EBUSY;
  83. goto unlock_out;
  84. }
  85. ret = damon_set_attrs(ctx, &attrs);
  86. if (!ret)
  87. ret = count;
  88. unlock_out:
  89. mutex_unlock(&ctx->kdamond_lock);
  90. out:
  91. kfree(kbuf);
  92. return ret;
  93. }
  94. /*
  95. * Return corresponding dbgfs' scheme action value (int) for the given
  96. * damos_action if the given damos_action value is valid and supported by
  97. * dbgfs, negative error code otherwise.
  98. */
  99. static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
  100. {
  101. switch (action) {
  102. case DAMOS_WILLNEED:
  103. return 0;
  104. case DAMOS_COLD:
  105. return 1;
  106. case DAMOS_PAGEOUT:
  107. return 2;
  108. case DAMOS_HUGEPAGE:
  109. return 3;
  110. case DAMOS_NOHUGEPAGE:
  111. return 4;
  112. case DAMOS_STAT:
  113. return 5;
  114. default:
  115. return -EINVAL;
  116. }
  117. }
  118. static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
  119. {
  120. struct damos *s;
  121. int written = 0;
  122. int rc;
  123. damon_for_each_scheme(s, c) {
  124. rc = scnprintf(&buf[written], len - written,
  125. "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
  126. s->pattern.min_sz_region,
  127. s->pattern.max_sz_region,
  128. s->pattern.min_nr_accesses,
  129. s->pattern.max_nr_accesses,
  130. s->pattern.min_age_region,
  131. s->pattern.max_age_region,
  132. damos_action_to_dbgfs_scheme_action(s->action),
  133. s->quota.ms, s->quota.sz,
  134. s->quota.reset_interval,
  135. s->quota.weight_sz,
  136. s->quota.weight_nr_accesses,
  137. s->quota.weight_age,
  138. s->wmarks.metric, s->wmarks.interval,
  139. s->wmarks.high, s->wmarks.mid, s->wmarks.low,
  140. s->stat.nr_tried, s->stat.sz_tried,
  141. s->stat.nr_applied, s->stat.sz_applied,
  142. s->stat.qt_exceeds);
  143. if (!rc)
  144. return -ENOMEM;
  145. written += rc;
  146. }
  147. return written;
  148. }
  149. static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
  150. size_t count, loff_t *ppos)
  151. {
  152. struct damon_ctx *ctx = file->private_data;
  153. char *kbuf;
  154. ssize_t len;
  155. kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
  156. if (!kbuf)
  157. return -ENOMEM;
  158. mutex_lock(&ctx->kdamond_lock);
  159. len = sprint_schemes(ctx, kbuf, count);
  160. mutex_unlock(&ctx->kdamond_lock);
  161. if (len < 0)
  162. goto out;
  163. len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
  164. out:
  165. kfree(kbuf);
  166. return len;
  167. }
  168. static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
  169. {
  170. ssize_t i;
  171. for (i = 0; i < nr_schemes; i++)
  172. kfree(schemes[i]);
  173. kfree(schemes);
  174. }
  175. /*
  176. * Return corresponding damos_action for the given dbgfs input for a scheme
  177. * action if the input is valid, negative error code otherwise.
  178. */
  179. static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
  180. {
  181. switch (dbgfs_action) {
  182. case 0:
  183. return DAMOS_WILLNEED;
  184. case 1:
  185. return DAMOS_COLD;
  186. case 2:
  187. return DAMOS_PAGEOUT;
  188. case 3:
  189. return DAMOS_HUGEPAGE;
  190. case 4:
  191. return DAMOS_NOHUGEPAGE;
  192. case 5:
  193. return DAMOS_STAT;
  194. default:
  195. return -EINVAL;
  196. }
  197. }
  198. /*
  199. * Converts a string into an array of struct damos pointers
  200. *
  201. * Returns an array of struct damos pointers that converted if the conversion
  202. * success, or NULL otherwise.
  203. */
  204. static struct damos **str_to_schemes(const char *str, ssize_t len,
  205. ssize_t *nr_schemes)
  206. {
  207. struct damos *scheme, **schemes;
  208. const int max_nr_schemes = 256;
  209. int pos = 0, parsed, ret;
  210. unsigned int action_input;
  211. enum damos_action action;
  212. schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
  213. GFP_KERNEL);
  214. if (!schemes)
  215. return NULL;
  216. *nr_schemes = 0;
  217. while (pos < len && *nr_schemes < max_nr_schemes) {
  218. struct damos_access_pattern pattern = {};
  219. struct damos_quota quota = {};
  220. struct damos_watermarks wmarks;
  221. ret = sscanf(&str[pos],
  222. "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
  223. &pattern.min_sz_region, &pattern.max_sz_region,
  224. &pattern.min_nr_accesses,
  225. &pattern.max_nr_accesses,
  226. &pattern.min_age_region,
  227. &pattern.max_age_region,
  228. &action_input, &quota.ms,
  229. &quota.sz, &quota.reset_interval,
  230. &quota.weight_sz, &quota.weight_nr_accesses,
  231. &quota.weight_age, &wmarks.metric,
  232. &wmarks.interval, &wmarks.high, &wmarks.mid,
  233. &wmarks.low, &parsed);
  234. if (ret != 18)
  235. break;
  236. action = dbgfs_scheme_action_to_damos_action(action_input);
  237. if ((int)action < 0)
  238. goto fail;
  239. if (pattern.min_sz_region > pattern.max_sz_region ||
  240. pattern.min_nr_accesses > pattern.max_nr_accesses ||
  241. pattern.min_age_region > pattern.max_age_region)
  242. goto fail;
  243. if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
  244. wmarks.mid < wmarks.low)
  245. goto fail;
  246. pos += parsed;
  247. scheme = damon_new_scheme(&pattern, action, 0, &quota,
  248. &wmarks, NUMA_NO_NODE);
  249. if (!scheme)
  250. goto fail;
  251. schemes[*nr_schemes] = scheme;
  252. *nr_schemes += 1;
  253. }
  254. return schemes;
  255. fail:
  256. free_schemes_arr(schemes, *nr_schemes);
  257. return NULL;
  258. }
  259. static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
  260. size_t count, loff_t *ppos)
  261. {
  262. struct damon_ctx *ctx = file->private_data;
  263. char *kbuf;
  264. struct damos **schemes;
  265. ssize_t nr_schemes = 0, ret;
  266. kbuf = user_input_str(buf, count, ppos);
  267. if (IS_ERR(kbuf))
  268. return PTR_ERR(kbuf);
  269. schemes = str_to_schemes(kbuf, count, &nr_schemes);
  270. if (!schemes) {
  271. ret = -EINVAL;
  272. goto out;
  273. }
  274. mutex_lock(&ctx->kdamond_lock);
  275. if (ctx->kdamond) {
  276. ret = -EBUSY;
  277. goto unlock_out;
  278. }
  279. damon_set_schemes(ctx, schemes, nr_schemes);
  280. ret = count;
  281. nr_schemes = 0;
  282. unlock_out:
  283. mutex_unlock(&ctx->kdamond_lock);
  284. free_schemes_arr(schemes, nr_schemes);
  285. out:
  286. kfree(kbuf);
  287. return ret;
  288. }
  289. static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
  290. {
  291. struct damon_target *t;
  292. int id;
  293. int written = 0;
  294. int rc;
  295. damon_for_each_target(t, ctx) {
  296. if (damon_target_has_pid(ctx))
  297. /* Show pid numbers to debugfs users */
  298. id = pid_vnr(t->pid);
  299. else
  300. /* Show 42 for physical address space, just for fun */
  301. id = 42;
  302. rc = scnprintf(&buf[written], len - written, "%d ", id);
  303. if (!rc)
  304. return -ENOMEM;
  305. written += rc;
  306. }
  307. if (written)
  308. written -= 1;
  309. written += scnprintf(&buf[written], len - written, "\n");
  310. return written;
  311. }
  312. static ssize_t dbgfs_target_ids_read(struct file *file,
  313. char __user *buf, size_t count, loff_t *ppos)
  314. {
  315. struct damon_ctx *ctx = file->private_data;
  316. ssize_t len;
  317. char ids_buf[320];
  318. mutex_lock(&ctx->kdamond_lock);
  319. len = sprint_target_ids(ctx, ids_buf, 320);
  320. mutex_unlock(&ctx->kdamond_lock);
  321. if (len < 0)
  322. return len;
  323. return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
  324. }
  325. /*
  326. * Converts a string into an integers array
  327. *
  328. * Returns an array of integers array if the conversion success, or NULL
  329. * otherwise.
  330. */
  331. static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
  332. {
  333. int *array;
  334. const int max_nr_ints = 32;
  335. int nr;
  336. int pos = 0, parsed, ret;
  337. *nr_ints = 0;
  338. array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
  339. if (!array)
  340. return NULL;
  341. while (*nr_ints < max_nr_ints && pos < len) {
  342. ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
  343. pos += parsed;
  344. if (ret != 1)
  345. break;
  346. array[*nr_ints] = nr;
  347. *nr_ints += 1;
  348. }
  349. return array;
  350. }
  351. static void dbgfs_put_pids(struct pid **pids, int nr_pids)
  352. {
  353. int i;
  354. for (i = 0; i < nr_pids; i++)
  355. put_pid(pids[i]);
  356. }
  357. /*
  358. * Converts a string into an struct pid pointers array
  359. *
  360. * Returns an array of struct pid pointers if the conversion success, or NULL
  361. * otherwise.
  362. */
  363. static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
  364. {
  365. int *ints;
  366. ssize_t nr_ints;
  367. struct pid **pids;
  368. *nr_pids = 0;
  369. ints = str_to_ints(str, len, &nr_ints);
  370. if (!ints)
  371. return NULL;
  372. pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
  373. if (!pids)
  374. goto out;
  375. for (; *nr_pids < nr_ints; (*nr_pids)++) {
  376. pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
  377. if (!pids[*nr_pids]) {
  378. dbgfs_put_pids(pids, *nr_pids);
  379. kfree(ints);
  380. kfree(pids);
  381. return NULL;
  382. }
  383. }
  384. out:
  385. kfree(ints);
  386. return pids;
  387. }
  388. /*
  389. * dbgfs_set_targets() - Set monitoring targets.
  390. * @ctx: monitoring context
  391. * @nr_targets: number of targets
  392. * @pids: array of target pids (size is same to @nr_targets)
  393. *
  394. * This function should not be called while the kdamond is running. @pids is
  395. * ignored if the context is not configured to have pid in each target. On
  396. * failure, reference counts of all pids in @pids are decremented.
  397. *
  398. * Return: 0 on success, negative error code otherwise.
  399. */
  400. static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
  401. struct pid **pids)
  402. {
  403. ssize_t i;
  404. struct damon_target *t, *next;
  405. damon_for_each_target_safe(t, next, ctx) {
  406. if (damon_target_has_pid(ctx))
  407. put_pid(t->pid);
  408. damon_destroy_target(t);
  409. }
  410. for (i = 0; i < nr_targets; i++) {
  411. t = damon_new_target();
  412. if (!t) {
  413. damon_for_each_target_safe(t, next, ctx)
  414. damon_destroy_target(t);
  415. if (damon_target_has_pid(ctx))
  416. dbgfs_put_pids(pids, nr_targets);
  417. return -ENOMEM;
  418. }
  419. if (damon_target_has_pid(ctx))
  420. t->pid = pids[i];
  421. damon_add_target(ctx, t);
  422. }
  423. return 0;
  424. }
  425. static ssize_t dbgfs_target_ids_write(struct file *file,
  426. const char __user *buf, size_t count, loff_t *ppos)
  427. {
  428. struct damon_ctx *ctx = file->private_data;
  429. bool id_is_pid = true;
  430. char *kbuf;
  431. struct pid **target_pids = NULL;
  432. ssize_t nr_targets;
  433. ssize_t ret;
  434. kbuf = user_input_str(buf, count, ppos);
  435. if (IS_ERR(kbuf))
  436. return PTR_ERR(kbuf);
  437. if (!strncmp(kbuf, "paddr\n", count)) {
  438. id_is_pid = false;
  439. nr_targets = 1;
  440. }
  441. if (id_is_pid) {
  442. target_pids = str_to_pids(kbuf, count, &nr_targets);
  443. if (!target_pids) {
  444. ret = -ENOMEM;
  445. goto out;
  446. }
  447. }
  448. mutex_lock(&ctx->kdamond_lock);
  449. if (ctx->kdamond) {
  450. if (id_is_pid)
  451. dbgfs_put_pids(target_pids, nr_targets);
  452. ret = -EBUSY;
  453. goto unlock_out;
  454. }
  455. /* remove previously set targets */
  456. dbgfs_set_targets(ctx, 0, NULL);
  457. if (!nr_targets) {
  458. ret = count;
  459. goto unlock_out;
  460. }
  461. /* Configure the context for the address space type */
  462. if (id_is_pid)
  463. ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
  464. else
  465. ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
  466. if (ret)
  467. goto unlock_out;
  468. ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
  469. if (!ret)
  470. ret = count;
  471. unlock_out:
  472. mutex_unlock(&ctx->kdamond_lock);
  473. kfree(target_pids);
  474. out:
  475. kfree(kbuf);
  476. return ret;
  477. }
  478. static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
  479. {
  480. struct damon_target *t;
  481. struct damon_region *r;
  482. int target_idx = 0;
  483. int written = 0;
  484. int rc;
  485. damon_for_each_target(t, c) {
  486. damon_for_each_region(r, t) {
  487. rc = scnprintf(&buf[written], len - written,
  488. "%d %lu %lu\n",
  489. target_idx, r->ar.start, r->ar.end);
  490. if (!rc)
  491. return -ENOMEM;
  492. written += rc;
  493. }
  494. target_idx++;
  495. }
  496. return written;
  497. }
  498. static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
  499. size_t count, loff_t *ppos)
  500. {
  501. struct damon_ctx *ctx = file->private_data;
  502. char *kbuf;
  503. ssize_t len;
  504. kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
  505. if (!kbuf)
  506. return -ENOMEM;
  507. mutex_lock(&ctx->kdamond_lock);
  508. if (ctx->kdamond) {
  509. mutex_unlock(&ctx->kdamond_lock);
  510. len = -EBUSY;
  511. goto out;
  512. }
  513. len = sprint_init_regions(ctx, kbuf, count);
  514. mutex_unlock(&ctx->kdamond_lock);
  515. if (len < 0)
  516. goto out;
  517. len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
  518. out:
  519. kfree(kbuf);
  520. return len;
  521. }
  522. static int add_init_region(struct damon_ctx *c, int target_idx,
  523. struct damon_addr_range *ar)
  524. {
  525. struct damon_target *t;
  526. struct damon_region *r, *prev;
  527. unsigned long idx = 0;
  528. int rc = -EINVAL;
  529. if (ar->start >= ar->end)
  530. return -EINVAL;
  531. damon_for_each_target(t, c) {
  532. if (idx++ == target_idx) {
  533. r = damon_new_region(ar->start, ar->end);
  534. if (!r)
  535. return -ENOMEM;
  536. damon_add_region(r, t);
  537. if (damon_nr_regions(t) > 1) {
  538. prev = damon_prev_region(r);
  539. if (prev->ar.end > r->ar.start) {
  540. damon_destroy_region(r, t);
  541. return -EINVAL;
  542. }
  543. }
  544. rc = 0;
  545. }
  546. }
  547. return rc;
  548. }
  549. static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
  550. {
  551. struct damon_target *t;
  552. struct damon_region *r, *next;
  553. int pos = 0, parsed, ret;
  554. int target_idx;
  555. struct damon_addr_range ar;
  556. int err;
  557. damon_for_each_target(t, c) {
  558. damon_for_each_region_safe(r, next, t)
  559. damon_destroy_region(r, t);
  560. }
  561. while (pos < len) {
  562. ret = sscanf(&str[pos], "%d %lu %lu%n",
  563. &target_idx, &ar.start, &ar.end, &parsed);
  564. if (ret != 3)
  565. break;
  566. err = add_init_region(c, target_idx, &ar);
  567. if (err)
  568. goto fail;
  569. pos += parsed;
  570. }
  571. return 0;
  572. fail:
  573. damon_for_each_target(t, c) {
  574. damon_for_each_region_safe(r, next, t)
  575. damon_destroy_region(r, t);
  576. }
  577. return err;
  578. }
  579. static ssize_t dbgfs_init_regions_write(struct file *file,
  580. const char __user *buf, size_t count,
  581. loff_t *ppos)
  582. {
  583. struct damon_ctx *ctx = file->private_data;
  584. char *kbuf;
  585. ssize_t ret = count;
  586. int err;
  587. kbuf = user_input_str(buf, count, ppos);
  588. if (IS_ERR(kbuf))
  589. return PTR_ERR(kbuf);
  590. mutex_lock(&ctx->kdamond_lock);
  591. if (ctx->kdamond) {
  592. ret = -EBUSY;
  593. goto unlock_out;
  594. }
  595. err = set_init_regions(ctx, kbuf, ret);
  596. if (err)
  597. ret = err;
  598. unlock_out:
  599. mutex_unlock(&ctx->kdamond_lock);
  600. kfree(kbuf);
  601. return ret;
  602. }
  603. static ssize_t dbgfs_kdamond_pid_read(struct file *file,
  604. char __user *buf, size_t count, loff_t *ppos)
  605. {
  606. struct damon_ctx *ctx = file->private_data;
  607. char *kbuf;
  608. ssize_t len;
  609. kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
  610. if (!kbuf)
  611. return -ENOMEM;
  612. mutex_lock(&ctx->kdamond_lock);
  613. if (ctx->kdamond)
  614. len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
  615. else
  616. len = scnprintf(kbuf, count, "none\n");
  617. mutex_unlock(&ctx->kdamond_lock);
  618. if (!len)
  619. goto out;
  620. len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
  621. out:
  622. kfree(kbuf);
  623. return len;
  624. }
  625. static int damon_dbgfs_open(struct inode *inode, struct file *file)
  626. {
  627. damon_dbgfs_warn_deprecation();
  628. file->private_data = inode->i_private;
  629. return nonseekable_open(inode, file);
  630. }
  631. static const struct file_operations attrs_fops = {
  632. .open = damon_dbgfs_open,
  633. .read = dbgfs_attrs_read,
  634. .write = dbgfs_attrs_write,
  635. };
  636. static const struct file_operations schemes_fops = {
  637. .open = damon_dbgfs_open,
  638. .read = dbgfs_schemes_read,
  639. .write = dbgfs_schemes_write,
  640. };
  641. static const struct file_operations target_ids_fops = {
  642. .open = damon_dbgfs_open,
  643. .read = dbgfs_target_ids_read,
  644. .write = dbgfs_target_ids_write,
  645. };
  646. static const struct file_operations init_regions_fops = {
  647. .open = damon_dbgfs_open,
  648. .read = dbgfs_init_regions_read,
  649. .write = dbgfs_init_regions_write,
  650. };
  651. static const struct file_operations kdamond_pid_fops = {
  652. .open = damon_dbgfs_open,
  653. .read = dbgfs_kdamond_pid_read,
  654. };
  655. static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
  656. {
  657. const char * const file_names[] = {"attrs", "schemes", "target_ids",
  658. "init_regions", "kdamond_pid"};
  659. const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
  660. &target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
  661. int i;
  662. for (i = 0; i < ARRAY_SIZE(file_names); i++)
  663. debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
  664. }
  665. static void dbgfs_before_terminate(struct damon_ctx *ctx)
  666. {
  667. struct damon_target *t, *next;
  668. if (!damon_target_has_pid(ctx))
  669. return;
  670. mutex_lock(&ctx->kdamond_lock);
  671. damon_for_each_target_safe(t, next, ctx) {
  672. put_pid(t->pid);
  673. damon_destroy_target(t);
  674. }
  675. mutex_unlock(&ctx->kdamond_lock);
  676. }
  677. static struct damon_ctx *dbgfs_new_ctx(void)
  678. {
  679. struct damon_ctx *ctx;
  680. ctx = damon_new_ctx();
  681. if (!ctx)
  682. return NULL;
  683. if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
  684. damon_select_ops(ctx, DAMON_OPS_PADDR)) {
  685. damon_destroy_ctx(ctx);
  686. return NULL;
  687. }
  688. ctx->callback.before_terminate = dbgfs_before_terminate;
  689. return ctx;
  690. }
  691. static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
  692. {
  693. damon_destroy_ctx(ctx);
  694. }
  695. static ssize_t damon_dbgfs_deprecated_read(struct file *file,
  696. char __user *buf, size_t count, loff_t *ppos)
  697. {
  698. static const char kbuf[512] = DAMON_DBGFS_DEPRECATION_NOTICE;
  699. return simple_read_from_buffer(buf, count, ppos, kbuf, strlen(kbuf));
  700. }
  701. /*
  702. * Make a context of @name and create a debugfs directory for it.
  703. *
  704. * This function should be called while holding damon_dbgfs_lock.
  705. *
  706. * Returns 0 on success, negative error code otherwise.
  707. */
  708. static int dbgfs_mk_context(char *name)
  709. {
  710. struct dentry *root, **new_dirs, *new_dir;
  711. struct damon_ctx **new_ctxs, *new_ctx;
  712. if (damon_nr_running_ctxs())
  713. return -EBUSY;
  714. new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
  715. (dbgfs_nr_ctxs + 1), GFP_KERNEL);
  716. if (!new_ctxs)
  717. return -ENOMEM;
  718. dbgfs_ctxs = new_ctxs;
  719. new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
  720. (dbgfs_nr_ctxs + 1), GFP_KERNEL);
  721. if (!new_dirs)
  722. return -ENOMEM;
  723. dbgfs_dirs = new_dirs;
  724. root = dbgfs_dirs[0];
  725. if (!root)
  726. return -ENOENT;
  727. new_dir = debugfs_create_dir(name, root);
  728. /* Below check is required for a potential duplicated name case */
  729. if (IS_ERR(new_dir))
  730. return PTR_ERR(new_dir);
  731. dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
  732. new_ctx = dbgfs_new_ctx();
  733. if (!new_ctx) {
  734. debugfs_remove(new_dir);
  735. dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
  736. return -ENOMEM;
  737. }
  738. dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
  739. dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
  740. dbgfs_ctxs[dbgfs_nr_ctxs]);
  741. dbgfs_nr_ctxs++;
  742. return 0;
  743. }
  744. static ssize_t dbgfs_mk_context_write(struct file *file,
  745. const char __user *buf, size_t count, loff_t *ppos)
  746. {
  747. char *kbuf;
  748. char *ctx_name;
  749. ssize_t ret;
  750. kbuf = user_input_str(buf, count, ppos);
  751. if (IS_ERR(kbuf))
  752. return PTR_ERR(kbuf);
  753. ctx_name = kmalloc(count + 1, GFP_KERNEL);
  754. if (!ctx_name) {
  755. kfree(kbuf);
  756. return -ENOMEM;
  757. }
  758. /* Trim white space */
  759. if (sscanf(kbuf, "%s", ctx_name) != 1) {
  760. ret = -EINVAL;
  761. goto out;
  762. }
  763. mutex_lock(&damon_dbgfs_lock);
  764. ret = dbgfs_mk_context(ctx_name);
  765. if (!ret)
  766. ret = count;
  767. mutex_unlock(&damon_dbgfs_lock);
  768. out:
  769. kfree(kbuf);
  770. kfree(ctx_name);
  771. return ret;
  772. }
  773. /*
  774. * Remove a context of @name and its debugfs directory.
  775. *
  776. * This function should be called while holding damon_dbgfs_lock.
  777. *
  778. * Return 0 on success, negative error code otherwise.
  779. */
  780. static int dbgfs_rm_context(char *name)
  781. {
  782. struct dentry *root, *dir, **new_dirs;
  783. struct inode *inode;
  784. struct damon_ctx **new_ctxs;
  785. int i, j;
  786. int ret = 0;
  787. if (damon_nr_running_ctxs())
  788. return -EBUSY;
  789. root = dbgfs_dirs[0];
  790. if (!root)
  791. return -ENOENT;
  792. dir = debugfs_lookup(name, root);
  793. if (!dir)
  794. return -ENOENT;
  795. inode = d_inode(dir);
  796. if (!S_ISDIR(inode->i_mode)) {
  797. ret = -EINVAL;
  798. goto out_dput;
  799. }
  800. new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
  801. GFP_KERNEL);
  802. if (!new_dirs) {
  803. ret = -ENOMEM;
  804. goto out_dput;
  805. }
  806. new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
  807. GFP_KERNEL);
  808. if (!new_ctxs) {
  809. ret = -ENOMEM;
  810. goto out_new_dirs;
  811. }
  812. for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
  813. if (dbgfs_dirs[i] == dir) {
  814. debugfs_remove(dbgfs_dirs[i]);
  815. dbgfs_destroy_ctx(dbgfs_ctxs[i]);
  816. continue;
  817. }
  818. new_dirs[j] = dbgfs_dirs[i];
  819. new_ctxs[j++] = dbgfs_ctxs[i];
  820. }
  821. kfree(dbgfs_dirs);
  822. kfree(dbgfs_ctxs);
  823. dbgfs_dirs = new_dirs;
  824. dbgfs_ctxs = new_ctxs;
  825. dbgfs_nr_ctxs--;
  826. goto out_dput;
  827. out_new_dirs:
  828. kfree(new_dirs);
  829. out_dput:
  830. dput(dir);
  831. return ret;
  832. }
  833. static ssize_t dbgfs_rm_context_write(struct file *file,
  834. const char __user *buf, size_t count, loff_t *ppos)
  835. {
  836. char *kbuf;
  837. ssize_t ret;
  838. char *ctx_name;
  839. kbuf = user_input_str(buf, count, ppos);
  840. if (IS_ERR(kbuf))
  841. return PTR_ERR(kbuf);
  842. ctx_name = kmalloc(count + 1, GFP_KERNEL);
  843. if (!ctx_name) {
  844. kfree(kbuf);
  845. return -ENOMEM;
  846. }
  847. /* Trim white space */
  848. if (sscanf(kbuf, "%s", ctx_name) != 1) {
  849. ret = -EINVAL;
  850. goto out;
  851. }
  852. mutex_lock(&damon_dbgfs_lock);
  853. ret = dbgfs_rm_context(ctx_name);
  854. if (!ret)
  855. ret = count;
  856. mutex_unlock(&damon_dbgfs_lock);
  857. out:
  858. kfree(kbuf);
  859. kfree(ctx_name);
  860. return ret;
  861. }
  862. static ssize_t dbgfs_monitor_on_read(struct file *file,
  863. char __user *buf, size_t count, loff_t *ppos)
  864. {
  865. char monitor_on_buf[5];
  866. bool monitor_on = damon_nr_running_ctxs() != 0;
  867. int len;
  868. len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
  869. return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
  870. }
  871. static ssize_t dbgfs_monitor_on_write(struct file *file,
  872. const char __user *buf, size_t count, loff_t *ppos)
  873. {
  874. ssize_t ret;
  875. char *kbuf;
  876. kbuf = user_input_str(buf, count, ppos);
  877. if (IS_ERR(kbuf))
  878. return PTR_ERR(kbuf);
  879. /* Remove white space */
  880. if (sscanf(kbuf, "%s", kbuf) != 1) {
  881. kfree(kbuf);
  882. return -EINVAL;
  883. }
  884. mutex_lock(&damon_dbgfs_lock);
  885. if (!strncmp(kbuf, "on", count)) {
  886. int i;
  887. for (i = 0; i < dbgfs_nr_ctxs; i++) {
  888. if (damon_targets_empty(dbgfs_ctxs[i])) {
  889. kfree(kbuf);
  890. mutex_unlock(&damon_dbgfs_lock);
  891. return -EINVAL;
  892. }
  893. }
  894. ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
  895. } else if (!strncmp(kbuf, "off", count)) {
  896. ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
  897. } else {
  898. ret = -EINVAL;
  899. }
  900. mutex_unlock(&damon_dbgfs_lock);
  901. if (!ret)
  902. ret = count;
  903. kfree(kbuf);
  904. return ret;
  905. }
  906. static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file)
  907. {
  908. damon_dbgfs_warn_deprecation();
  909. return nonseekable_open(inode, file);
  910. }
  911. static const struct file_operations deprecated_fops = {
  912. .read = damon_dbgfs_deprecated_read,
  913. };
  914. static const struct file_operations mk_contexts_fops = {
  915. .open = damon_dbgfs_static_file_open,
  916. .write = dbgfs_mk_context_write,
  917. };
  918. static const struct file_operations rm_contexts_fops = {
  919. .open = damon_dbgfs_static_file_open,
  920. .write = dbgfs_rm_context_write,
  921. };
  922. static const struct file_operations monitor_on_fops = {
  923. .open = damon_dbgfs_static_file_open,
  924. .read = dbgfs_monitor_on_read,
  925. .write = dbgfs_monitor_on_write,
  926. };
  927. static int __init __damon_dbgfs_init(void)
  928. {
  929. struct dentry *dbgfs_root;
  930. const char * const file_names[] = {"mk_contexts", "rm_contexts",
  931. "monitor_on_DEPRECATED", "DEPRECATED"};
  932. const struct file_operations *fops[] = {&mk_contexts_fops,
  933. &rm_contexts_fops, &monitor_on_fops, &deprecated_fops};
  934. int i;
  935. dbgfs_root = debugfs_create_dir("damon", NULL);
  936. for (i = 0; i < ARRAY_SIZE(file_names); i++)
  937. debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
  938. fops[i]);
  939. dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
  940. dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
  941. if (!dbgfs_dirs) {
  942. debugfs_remove(dbgfs_root);
  943. return -ENOMEM;
  944. }
  945. dbgfs_dirs[0] = dbgfs_root;
  946. return 0;
  947. }
  948. /*
  949. * Functions for the initialization
  950. */
  951. static int __init damon_dbgfs_init(void)
  952. {
  953. int rc = -ENOMEM;
  954. mutex_lock(&damon_dbgfs_lock);
  955. dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
  956. if (!dbgfs_ctxs)
  957. goto out;
  958. dbgfs_ctxs[0] = dbgfs_new_ctx();
  959. if (!dbgfs_ctxs[0]) {
  960. kfree(dbgfs_ctxs);
  961. goto out;
  962. }
  963. dbgfs_nr_ctxs = 1;
  964. rc = __damon_dbgfs_init();
  965. if (rc) {
  966. kfree(dbgfs_ctxs[0]);
  967. kfree(dbgfs_ctxs);
  968. pr_err("%s: dbgfs init failed\n", __func__);
  969. }
  970. out:
  971. mutex_unlock(&damon_dbgfs_lock);
  972. return rc;
  973. }
  974. module_init(damon_dbgfs_init);
  975. #include "tests/dbgfs-kunit.h"