memtrace.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Copyright (C) IBM Corporation, 2014, 2017
  3. * Anton Blanchard, Rashmica Gupta.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. */
  10. #define pr_fmt(fmt) "memtrace: " fmt
  11. #include <linux/bitops.h>
  12. #include <linux/string.h>
  13. #include <linux/memblock.h>
  14. #include <linux/init.h>
  15. #include <linux/moduleparam.h>
  16. #include <linux/fs.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/slab.h>
  19. #include <linux/memory.h>
  20. #include <linux/memory_hotplug.h>
  21. #include <asm/machdep.h>
  22. #include <asm/debugfs.h>
  23. /* This enables us to keep track of the memory removed from each node. */
  24. struct memtrace_entry {
  25. void *mem;
  26. u64 start;
  27. u64 size;
  28. u32 nid;
  29. struct dentry *dir;
  30. char name[16];
  31. };
  32. static DEFINE_MUTEX(memtrace_mutex);
  33. static u64 memtrace_size;
  34. static struct memtrace_entry *memtrace_array;
  35. static unsigned int memtrace_array_nr;
  36. static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
  37. size_t count, loff_t *ppos)
  38. {
  39. struct memtrace_entry *ent = filp->private_data;
  40. return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
  41. }
  42. static const struct file_operations memtrace_fops = {
  43. .llseek = default_llseek,
  44. .read = memtrace_read,
  45. .open = simple_open,
  46. };
  47. static int check_memblock_online(struct memory_block *mem, void *arg)
  48. {
  49. if (mem->state != MEM_ONLINE)
  50. return -1;
  51. return 0;
  52. }
  53. static int change_memblock_state(struct memory_block *mem, void *arg)
  54. {
  55. unsigned long state = (unsigned long)arg;
  56. mem->state = state;
  57. return 0;
  58. }
  59. static void memtrace_clear_range(unsigned long start_pfn,
  60. unsigned long nr_pages)
  61. {
  62. unsigned long pfn;
  63. /*
  64. * As pages are offline, we cannot trust the memmap anymore. As HIGHMEM
  65. * does not apply, avoid passing around "struct page" and use
  66. * clear_page() instead directly.
  67. */
  68. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
  69. if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
  70. cond_resched();
  71. clear_page(__va(PFN_PHYS(pfn)));
  72. }
  73. }
  74. /* called with device_hotplug_lock held */
  75. static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
  76. {
  77. u64 end_pfn = start_pfn + nr_pages - 1;
  78. if (walk_memory_range(start_pfn, end_pfn, NULL,
  79. check_memblock_online))
  80. return false;
  81. walk_memory_range(start_pfn, end_pfn, (void *)MEM_GOING_OFFLINE,
  82. change_memblock_state);
  83. if (offline_pages(start_pfn, nr_pages)) {
  84. walk_memory_range(start_pfn, end_pfn, (void *)MEM_ONLINE,
  85. change_memblock_state);
  86. return false;
  87. }
  88. walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
  89. change_memblock_state);
  90. return true;
  91. }
  92. static u64 memtrace_alloc_node(u32 nid, u64 size)
  93. {
  94. u64 start_pfn, end_pfn, nr_pages, pfn;
  95. u64 base_pfn;
  96. u64 bytes = memory_block_size_bytes();
  97. if (!node_spanned_pages(nid))
  98. return 0;
  99. start_pfn = node_start_pfn(nid);
  100. end_pfn = node_end_pfn(nid);
  101. nr_pages = size >> PAGE_SHIFT;
  102. /* Trace memory needs to be aligned to the size */
  103. end_pfn = round_down(end_pfn - nr_pages, nr_pages);
  104. lock_device_hotplug();
  105. for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
  106. if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
  107. /*
  108. * Clear the range while we still have a linear
  109. * mapping.
  110. */
  111. memtrace_clear_range(base_pfn, nr_pages);
  112. /*
  113. * Remove memory in memory block size chunks so that
  114. * iomem resources are always split to the same size and
  115. * we never try to remove memory that spans two iomem
  116. * resources.
  117. */
  118. end_pfn = base_pfn + nr_pages;
  119. for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
  120. __remove_memory(nid, pfn << PAGE_SHIFT, bytes);
  121. }
  122. unlock_device_hotplug();
  123. return base_pfn << PAGE_SHIFT;
  124. }
  125. }
  126. unlock_device_hotplug();
  127. return 0;
  128. }
  129. static int memtrace_init_regions_runtime(u64 size)
  130. {
  131. u32 nid;
  132. u64 m;
  133. memtrace_array = kcalloc(num_online_nodes(),
  134. sizeof(struct memtrace_entry), GFP_KERNEL);
  135. if (!memtrace_array) {
  136. pr_err("Failed to allocate memtrace_array\n");
  137. return -EINVAL;
  138. }
  139. for_each_online_node(nid) {
  140. m = memtrace_alloc_node(nid, size);
  141. /*
  142. * A node might not have any local memory, so warn but
  143. * continue on.
  144. */
  145. if (!m) {
  146. pr_err("Failed to allocate trace memory on node %d\n", nid);
  147. continue;
  148. }
  149. pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
  150. memtrace_array[memtrace_array_nr].start = m;
  151. memtrace_array[memtrace_array_nr].size = size;
  152. memtrace_array[memtrace_array_nr].nid = nid;
  153. memtrace_array_nr++;
  154. }
  155. return 0;
  156. }
  157. static struct dentry *memtrace_debugfs_dir;
  158. static int memtrace_init_debugfs(void)
  159. {
  160. int ret = 0;
  161. int i;
  162. for (i = 0; i < memtrace_array_nr; i++) {
  163. struct dentry *dir;
  164. struct memtrace_entry *ent = &memtrace_array[i];
  165. ent->mem = ioremap(ent->start, ent->size);
  166. /* Warn but continue on */
  167. if (!ent->mem) {
  168. pr_err("Failed to map trace memory at 0x%llx\n",
  169. ent->start);
  170. ret = -1;
  171. continue;
  172. }
  173. snprintf(ent->name, 16, "%08x", ent->nid);
  174. dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
  175. if (!dir) {
  176. pr_err("Failed to create debugfs directory for node %d\n",
  177. ent->nid);
  178. return -1;
  179. }
  180. ent->dir = dir;
  181. debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
  182. debugfs_create_x64("start", 0400, dir, &ent->start);
  183. debugfs_create_x64("size", 0400, dir, &ent->size);
  184. }
  185. return ret;
  186. }
  187. static int online_mem_block(struct memory_block *mem, void *arg)
  188. {
  189. return device_online(&mem->dev);
  190. }
  191. /*
  192. * Iterate through the chunks of memory we have removed from the kernel
  193. * and attempt to add them back to the kernel.
  194. */
  195. static int memtrace_online(void)
  196. {
  197. int i, ret = 0;
  198. struct memtrace_entry *ent;
  199. for (i = memtrace_array_nr - 1; i >= 0; i--) {
  200. ent = &memtrace_array[i];
  201. /* We have onlined this chunk previously */
  202. if (ent->nid == -1)
  203. continue;
  204. /* Remove from io mappings */
  205. if (ent->mem) {
  206. iounmap(ent->mem);
  207. ent->mem = 0;
  208. }
  209. if (add_memory(ent->nid, ent->start, ent->size)) {
  210. pr_err("Failed to add trace memory to node %d\n",
  211. ent->nid);
  212. ret += 1;
  213. continue;
  214. }
  215. /*
  216. * If kernel isn't compiled with the auto online option
  217. * we need to online the memory ourselves.
  218. */
  219. if (!memhp_auto_online) {
  220. lock_device_hotplug();
  221. walk_memory_range(PFN_DOWN(ent->start),
  222. PFN_UP(ent->start + ent->size - 1),
  223. NULL, online_mem_block);
  224. unlock_device_hotplug();
  225. }
  226. /*
  227. * Memory was added successfully so clean up references to it
  228. * so on reentry we can tell that this chunk was added.
  229. */
  230. debugfs_remove_recursive(ent->dir);
  231. pr_info("Added trace memory back to node %d\n", ent->nid);
  232. ent->size = ent->start = ent->nid = -1;
  233. }
  234. if (ret)
  235. return ret;
  236. /* If all chunks of memory were added successfully, reset globals */
  237. kfree(memtrace_array);
  238. memtrace_array = NULL;
  239. memtrace_size = 0;
  240. memtrace_array_nr = 0;
  241. return 0;
  242. }
  243. static int memtrace_enable_set(void *data, u64 val)
  244. {
  245. int rc = -EAGAIN;
  246. u64 bytes;
  247. /*
  248. * Don't attempt to do anything if size isn't aligned to a memory
  249. * block or equal to zero.
  250. */
  251. bytes = memory_block_size_bytes();
  252. if (val & (bytes - 1)) {
  253. pr_err("Value must be aligned with 0x%llx\n", bytes);
  254. return -EINVAL;
  255. }
  256. mutex_lock(&memtrace_mutex);
  257. /* Re-add/online previously removed/offlined memory */
  258. if (memtrace_size) {
  259. if (memtrace_online())
  260. goto out_unlock;
  261. }
  262. if (!val) {
  263. rc = 0;
  264. goto out_unlock;
  265. }
  266. /* Offline and remove memory */
  267. if (memtrace_init_regions_runtime(val))
  268. goto out_unlock;
  269. if (memtrace_init_debugfs())
  270. goto out_unlock;
  271. memtrace_size = val;
  272. rc = 0;
  273. out_unlock:
  274. mutex_unlock(&memtrace_mutex);
  275. return rc;
  276. }
  277. static int memtrace_enable_get(void *data, u64 *val)
  278. {
  279. *val = memtrace_size;
  280. return 0;
  281. }
  282. DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
  283. memtrace_enable_set, "0x%016llx\n");
  284. static int memtrace_init(void)
  285. {
  286. memtrace_debugfs_dir = debugfs_create_dir("memtrace",
  287. powerpc_debugfs_root);
  288. if (!memtrace_debugfs_dir)
  289. return -1;
  290. debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
  291. NULL, &memtrace_init_fops);
  292. return 0;
  293. }
  294. machine_device_initcall(powernv, memtrace_init);