kcore.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/proc/kcore.c kernel ELF core dumper
  4. *
  5. * Modelled on fs/exec.c:aout_core_dump()
  6. * Jeremy Fitzhardinge <jeremy@sw.oz.au>
  7. * ELF version written by David Howells <David.Howells@nexor.co.uk>
  8. * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
  9. * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
  10. * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
  11. */
  12. #include <linux/crash_core.h>
  13. #include <linux/mm.h>
  14. #include <linux/proc_fs.h>
  15. #include <linux/kcore.h>
  16. #include <linux/user.h>
  17. #include <linux/capability.h>
  18. #include <linux/elf.h>
  19. #include <linux/elfcore.h>
  20. #include <linux/notifier.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/highmem.h>
  23. #include <linux/printk.h>
  24. #include <linux/bootmem.h>
  25. #include <linux/init.h>
  26. #include <linux/slab.h>
  27. #include <linux/uaccess.h>
  28. #include <asm/io.h>
  29. #include <linux/list.h>
  30. #include <linux/ioport.h>
  31. #include <linux/memory.h>
  32. #include <linux/sched/task.h>
  33. #include <asm/sections.h>
  34. #include "internal.h"
  35. #define CORE_STR "CORE"
  36. #ifndef ELF_CORE_EFLAGS
  37. #define ELF_CORE_EFLAGS 0
  38. #endif
  39. static struct proc_dir_entry *proc_root_kcore;
  40. #ifndef kc_vaddr_to_offset
  41. #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
  42. #endif
  43. #ifndef kc_offset_to_vaddr
  44. #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
  45. #endif
  46. static LIST_HEAD(kclist_head);
  47. static DECLARE_RWSEM(kclist_lock);
  48. static int kcore_need_update = 1;
  49. /*
  50. * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  51. * Same as oldmem_pfn_is_ram in vmcore
  52. */
  53. static int (*mem_pfn_is_ram)(unsigned long pfn);
  54. int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
  55. {
  56. if (mem_pfn_is_ram)
  57. return -EBUSY;
  58. mem_pfn_is_ram = fn;
  59. return 0;
  60. }
  61. static int pfn_is_ram(unsigned long pfn)
  62. {
  63. if (mem_pfn_is_ram)
  64. return mem_pfn_is_ram(pfn);
  65. else
  66. return 1;
  67. }
  68. /* This doesn't grab kclist_lock, so it should only be used at init time. */
  69. void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
  70. int type)
  71. {
  72. new->addr = (unsigned long)addr;
  73. new->size = size;
  74. new->type = type;
  75. list_add_tail(&new->list, &kclist_head);
  76. }
  77. static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
  78. size_t *data_offset)
  79. {
  80. size_t try, size;
  81. struct kcore_list *m;
  82. *nphdr = 1; /* PT_NOTE */
  83. size = 0;
  84. list_for_each_entry(m, &kclist_head, list) {
  85. try = kc_vaddr_to_offset((size_t)m->addr + m->size);
  86. if (try > size)
  87. size = try;
  88. *nphdr = *nphdr + 1;
  89. }
  90. *phdrs_len = *nphdr * sizeof(struct elf_phdr);
  91. *notes_len = (4 * sizeof(struct elf_note) +
  92. 3 * ALIGN(sizeof(CORE_STR), 4) +
  93. VMCOREINFO_NOTE_NAME_BYTES +
  94. ALIGN(sizeof(struct elf_prstatus), 4) +
  95. ALIGN(sizeof(struct elf_prpsinfo), 4) +
  96. ALIGN(arch_task_struct_size, 4) +
  97. ALIGN(vmcoreinfo_size, 4));
  98. *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
  99. *notes_len);
  100. return *data_offset + size;
  101. }
  102. #ifdef CONFIG_HIGHMEM
  103. /*
  104. * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
  105. * because memory hole is not as big as !HIGHMEM case.
  106. * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
  107. */
  108. static int kcore_ram_list(struct list_head *head)
  109. {
  110. struct kcore_list *ent;
  111. ent = kmalloc(sizeof(*ent), GFP_KERNEL);
  112. if (!ent)
  113. return -ENOMEM;
  114. ent->addr = (unsigned long)__va(0);
  115. ent->size = max_low_pfn << PAGE_SHIFT;
  116. ent->type = KCORE_RAM;
  117. list_add(&ent->list, head);
  118. return 0;
  119. }
  120. #else /* !CONFIG_HIGHMEM */
  121. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  122. /* calculate vmemmap's address from given system ram pfn and register it */
  123. static int
  124. get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
  125. {
  126. unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
  127. unsigned long nr_pages = ent->size >> PAGE_SHIFT;
  128. unsigned long start, end;
  129. struct kcore_list *vmm, *tmp;
  130. start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
  131. end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
  132. end = PAGE_ALIGN(end);
  133. /* overlap check (because we have to align page */
  134. list_for_each_entry(tmp, head, list) {
  135. if (tmp->type != KCORE_VMEMMAP)
  136. continue;
  137. if (start < tmp->addr + tmp->size)
  138. if (end > tmp->addr)
  139. end = tmp->addr;
  140. }
  141. if (start < end) {
  142. vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
  143. if (!vmm)
  144. return 0;
  145. vmm->addr = start;
  146. vmm->size = end - start;
  147. vmm->type = KCORE_VMEMMAP;
  148. list_add_tail(&vmm->list, head);
  149. }
  150. return 1;
  151. }
  152. #else
  153. static int
  154. get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
  155. {
  156. return 1;
  157. }
  158. #endif
  159. static int
  160. kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
  161. {
  162. struct list_head *head = (struct list_head *)arg;
  163. struct kcore_list *ent;
  164. struct page *p;
  165. if (!pfn_valid(pfn))
  166. return 1;
  167. p = pfn_to_page(pfn);
  168. if (!memmap_valid_within(pfn, p, page_zone(p)))
  169. return 1;
  170. ent = kmalloc(sizeof(*ent), GFP_KERNEL);
  171. if (!ent)
  172. return -ENOMEM;
  173. ent->addr = (unsigned long)page_to_virt(p);
  174. ent->size = nr_pages << PAGE_SHIFT;
  175. if (!virt_addr_valid(ent->addr))
  176. goto free_out;
  177. /* cut not-mapped area. ....from ppc-32 code. */
  178. if (ULONG_MAX - ent->addr < ent->size)
  179. ent->size = ULONG_MAX - ent->addr;
  180. /*
  181. * We've already checked virt_addr_valid so we know this address
  182. * is a valid pointer, therefore we can check against it to determine
  183. * if we need to trim
  184. */
  185. if (VMALLOC_START > ent->addr) {
  186. if (VMALLOC_START - ent->addr < ent->size)
  187. ent->size = VMALLOC_START - ent->addr;
  188. }
  189. ent->type = KCORE_RAM;
  190. list_add_tail(&ent->list, head);
  191. if (!get_sparsemem_vmemmap_info(ent, head)) {
  192. list_del(&ent->list);
  193. goto free_out;
  194. }
  195. return 0;
  196. free_out:
  197. kfree(ent);
  198. return 1;
  199. }
  200. static int kcore_ram_list(struct list_head *list)
  201. {
  202. int nid, ret;
  203. unsigned long end_pfn;
  204. /* Not inialized....update now */
  205. /* find out "max pfn" */
  206. end_pfn = 0;
  207. for_each_node_state(nid, N_MEMORY) {
  208. unsigned long node_end;
  209. node_end = node_end_pfn(nid);
  210. if (end_pfn < node_end)
  211. end_pfn = node_end;
  212. }
  213. /* scan 0 to max_pfn */
  214. ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
  215. if (ret)
  216. return -ENOMEM;
  217. return 0;
  218. }
  219. #endif /* CONFIG_HIGHMEM */
  220. static int kcore_update_ram(void)
  221. {
  222. LIST_HEAD(list);
  223. LIST_HEAD(garbage);
  224. int nphdr;
  225. size_t phdrs_len, notes_len, data_offset;
  226. struct kcore_list *tmp, *pos;
  227. int ret = 0;
  228. down_write(&kclist_lock);
  229. if (!xchg(&kcore_need_update, 0))
  230. goto out;
  231. ret = kcore_ram_list(&list);
  232. if (ret) {
  233. /* Couldn't get the RAM list, try again next time. */
  234. WRITE_ONCE(kcore_need_update, 1);
  235. list_splice_tail(&list, &garbage);
  236. goto out;
  237. }
  238. list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
  239. if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
  240. list_move(&pos->list, &garbage);
  241. }
  242. list_splice_tail(&list, &kclist_head);
  243. proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
  244. &data_offset);
  245. out:
  246. up_write(&kclist_lock);
  247. list_for_each_entry_safe(pos, tmp, &garbage, list) {
  248. list_del(&pos->list);
  249. kfree(pos);
  250. }
  251. return ret;
  252. }
  253. static void append_kcore_note(char *notes, size_t *i, const char *name,
  254. unsigned int type, const void *desc,
  255. size_t descsz)
  256. {
  257. struct elf_note *note = (struct elf_note *)&notes[*i];
  258. note->n_namesz = strlen(name) + 1;
  259. note->n_descsz = descsz;
  260. note->n_type = type;
  261. *i += sizeof(*note);
  262. memcpy(&notes[*i], name, note->n_namesz);
  263. *i = ALIGN(*i + note->n_namesz, 4);
  264. memcpy(&notes[*i], desc, descsz);
  265. *i = ALIGN(*i + descsz, 4);
  266. }
  267. static ssize_t
  268. read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
  269. {
  270. char *buf = file->private_data;
  271. size_t phdrs_offset, notes_offset, data_offset;
  272. size_t phdrs_len, notes_len;
  273. struct kcore_list *m;
  274. size_t tsz;
  275. int nphdr;
  276. unsigned long start;
  277. size_t orig_buflen = buflen;
  278. int ret = 0;
  279. down_read(&kclist_lock);
  280. get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
  281. phdrs_offset = sizeof(struct elfhdr);
  282. notes_offset = phdrs_offset + phdrs_len;
  283. /* ELF file header. */
  284. if (buflen && *fpos < sizeof(struct elfhdr)) {
  285. struct elfhdr ehdr = {
  286. .e_ident = {
  287. [EI_MAG0] = ELFMAG0,
  288. [EI_MAG1] = ELFMAG1,
  289. [EI_MAG2] = ELFMAG2,
  290. [EI_MAG3] = ELFMAG3,
  291. [EI_CLASS] = ELF_CLASS,
  292. [EI_DATA] = ELF_DATA,
  293. [EI_VERSION] = EV_CURRENT,
  294. [EI_OSABI] = ELF_OSABI,
  295. },
  296. .e_type = ET_CORE,
  297. .e_machine = ELF_ARCH,
  298. .e_version = EV_CURRENT,
  299. .e_phoff = sizeof(struct elfhdr),
  300. .e_flags = ELF_CORE_EFLAGS,
  301. .e_ehsize = sizeof(struct elfhdr),
  302. .e_phentsize = sizeof(struct elf_phdr),
  303. .e_phnum = nphdr,
  304. };
  305. tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
  306. if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
  307. ret = -EFAULT;
  308. goto out;
  309. }
  310. buffer += tsz;
  311. buflen -= tsz;
  312. *fpos += tsz;
  313. }
  314. /* ELF program headers. */
  315. if (buflen && *fpos < phdrs_offset + phdrs_len) {
  316. struct elf_phdr *phdrs, *phdr;
  317. phdrs = kzalloc(phdrs_len, GFP_KERNEL);
  318. if (!phdrs) {
  319. ret = -ENOMEM;
  320. goto out;
  321. }
  322. phdrs[0].p_type = PT_NOTE;
  323. phdrs[0].p_offset = notes_offset;
  324. phdrs[0].p_filesz = notes_len;
  325. phdr = &phdrs[1];
  326. list_for_each_entry(m, &kclist_head, list) {
  327. phdr->p_type = PT_LOAD;
  328. phdr->p_flags = PF_R | PF_W | PF_X;
  329. phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
  330. if (m->type == KCORE_REMAP)
  331. phdr->p_vaddr = (size_t)m->vaddr;
  332. else
  333. phdr->p_vaddr = (size_t)m->addr;
  334. if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
  335. phdr->p_paddr = __pa(m->addr);
  336. else if (m->type == KCORE_TEXT)
  337. phdr->p_paddr = __pa_symbol(m->addr);
  338. else
  339. phdr->p_paddr = (elf_addr_t)-1;
  340. phdr->p_filesz = phdr->p_memsz = m->size;
  341. phdr->p_align = PAGE_SIZE;
  342. phdr++;
  343. }
  344. tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
  345. if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
  346. tsz)) {
  347. kfree(phdrs);
  348. ret = -EFAULT;
  349. goto out;
  350. }
  351. kfree(phdrs);
  352. buffer += tsz;
  353. buflen -= tsz;
  354. *fpos += tsz;
  355. }
  356. /* ELF note segment. */
  357. if (buflen && *fpos < notes_offset + notes_len) {
  358. struct elf_prstatus prstatus = {};
  359. struct elf_prpsinfo prpsinfo = {
  360. .pr_sname = 'R',
  361. .pr_fname = "vmlinux",
  362. };
  363. char *notes;
  364. size_t i = 0;
  365. strlcpy(prpsinfo.pr_psargs, saved_command_line,
  366. sizeof(prpsinfo.pr_psargs));
  367. notes = kzalloc(notes_len, GFP_KERNEL);
  368. if (!notes) {
  369. ret = -ENOMEM;
  370. goto out;
  371. }
  372. append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
  373. sizeof(prstatus));
  374. append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
  375. sizeof(prpsinfo));
  376. append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
  377. arch_task_struct_size);
  378. /*
  379. * vmcoreinfo_size is mostly constant after init time, but it
  380. * can be changed by crash_save_vmcoreinfo(). Racing here with a
  381. * panic on another CPU before the machine goes down is insanely
  382. * unlikely, but it's better to not leave potential buffer
  383. * overflows lying around, regardless.
  384. */
  385. append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
  386. vmcoreinfo_data,
  387. min(vmcoreinfo_size, notes_len - i));
  388. tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
  389. if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
  390. kfree(notes);
  391. ret = -EFAULT;
  392. goto out;
  393. }
  394. kfree(notes);
  395. buffer += tsz;
  396. buflen -= tsz;
  397. *fpos += tsz;
  398. }
  399. /*
  400. * Check to see if our file offset matches with any of
  401. * the addresses in the elf_phdr on our list.
  402. */
  403. start = kc_offset_to_vaddr(*fpos - data_offset);
  404. if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
  405. tsz = buflen;
  406. m = NULL;
  407. while (buflen) {
  408. /*
  409. * If this is the first iteration or the address is not within
  410. * the previous entry, search for a matching entry.
  411. */
  412. if (!m || start < m->addr || start >= m->addr + m->size) {
  413. list_for_each_entry(m, &kclist_head, list) {
  414. if (start >= m->addr &&
  415. start < m->addr + m->size)
  416. break;
  417. }
  418. }
  419. if (&m->list == &kclist_head) {
  420. if (clear_user(buffer, tsz)) {
  421. ret = -EFAULT;
  422. goto out;
  423. }
  424. m = NULL; /* skip the list anchor */
  425. } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
  426. if (clear_user(buffer, tsz)) {
  427. ret = -EFAULT;
  428. goto out;
  429. }
  430. } else if (m->type == KCORE_VMALLOC) {
  431. vread(buf, (char *)start, tsz);
  432. /* we have to zero-fill user buffer even if no read */
  433. if (copy_to_user(buffer, buf, tsz)) {
  434. ret = -EFAULT;
  435. goto out;
  436. }
  437. } else if (m->type == KCORE_USER) {
  438. /* User page is handled prior to normal kernel page: */
  439. if (copy_to_user(buffer, (char *)start, tsz)) {
  440. ret = -EFAULT;
  441. goto out;
  442. }
  443. } else {
  444. if (kern_addr_valid(start)) {
  445. /*
  446. * Using bounce buffer to bypass the
  447. * hardened user copy kernel text checks.
  448. */
  449. if (probe_kernel_read(buf, (void *) start, tsz)) {
  450. if (clear_user(buffer, tsz)) {
  451. ret = -EFAULT;
  452. goto out;
  453. }
  454. } else {
  455. if (copy_to_user(buffer, buf, tsz)) {
  456. ret = -EFAULT;
  457. goto out;
  458. }
  459. }
  460. } else {
  461. if (clear_user(buffer, tsz)) {
  462. ret = -EFAULT;
  463. goto out;
  464. }
  465. }
  466. }
  467. buflen -= tsz;
  468. *fpos += tsz;
  469. buffer += tsz;
  470. start += tsz;
  471. tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
  472. }
  473. out:
  474. up_read(&kclist_lock);
  475. if (ret)
  476. return ret;
  477. return orig_buflen - buflen;
  478. }
  479. static int open_kcore(struct inode *inode, struct file *filp)
  480. {
  481. if (!capable(CAP_SYS_RAWIO))
  482. return -EPERM;
  483. filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
  484. if (!filp->private_data)
  485. return -ENOMEM;
  486. if (kcore_need_update)
  487. kcore_update_ram();
  488. if (i_size_read(inode) != proc_root_kcore->size) {
  489. inode_lock(inode);
  490. i_size_write(inode, proc_root_kcore->size);
  491. inode_unlock(inode);
  492. }
  493. return 0;
  494. }
  495. static int release_kcore(struct inode *inode, struct file *file)
  496. {
  497. kfree(file->private_data);
  498. return 0;
  499. }
  500. static const struct file_operations proc_kcore_operations = {
  501. .read = read_kcore,
  502. .open = open_kcore,
  503. .release = release_kcore,
  504. .llseek = default_llseek,
  505. };
  506. /* just remember that we have to update kcore */
  507. static int __meminit kcore_callback(struct notifier_block *self,
  508. unsigned long action, void *arg)
  509. {
  510. switch (action) {
  511. case MEM_ONLINE:
  512. case MEM_OFFLINE:
  513. kcore_need_update = 1;
  514. break;
  515. }
  516. return NOTIFY_OK;
  517. }
  518. static struct notifier_block kcore_callback_nb __meminitdata = {
  519. .notifier_call = kcore_callback,
  520. .priority = 0,
  521. };
  522. static struct kcore_list kcore_vmalloc;
  523. #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
  524. static struct kcore_list kcore_text;
  525. /*
  526. * If defined, special segment is used for mapping kernel text instead of
  527. * direct-map area. We need to create special TEXT section.
  528. */
  529. static void __init proc_kcore_text_init(void)
  530. {
  531. kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
  532. }
  533. #else
  534. static void __init proc_kcore_text_init(void)
  535. {
  536. }
  537. #endif
  538. #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
  539. /*
  540. * MODULES_VADDR has no intersection with VMALLOC_ADDR.
  541. */
  542. struct kcore_list kcore_modules;
  543. static void __init add_modules_range(void)
  544. {
  545. if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
  546. kclist_add(&kcore_modules, (void *)MODULES_VADDR,
  547. MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
  548. }
  549. }
  550. #else
  551. static void __init add_modules_range(void)
  552. {
  553. }
  554. #endif
  555. static int __init proc_kcore_init(void)
  556. {
  557. proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
  558. &proc_kcore_operations);
  559. if (!proc_root_kcore) {
  560. pr_err("couldn't create /proc/kcore\n");
  561. return 0; /* Always returns 0. */
  562. }
  563. /* Store text area if it's special */
  564. proc_kcore_text_init();
  565. /* Store vmalloc area */
  566. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  567. VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
  568. add_modules_range();
  569. /* Store direct-map area from physical memory map */
  570. kcore_update_ram();
  571. register_hotmemory_notifier(&kcore_callback_nb);
  572. return 0;
  573. }
  574. fs_initcall(proc_kcore_init);