crash_core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * crash.c - kernel crash support code.
  4. * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/buildid.h>
  8. #include <linux/init.h>
  9. #include <linux/utsname.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/sizes.h>
  12. #include <linux/kexec.h>
  13. #include <linux/memory.h>
  14. #include <linux/mm.h>
  15. #include <linux/cpuhotplug.h>
  16. #include <linux/memblock.h>
  17. #include <linux/kmemleak.h>
  18. #include <linux/crash_core.h>
  19. #include <linux/reboot.h>
  20. #include <linux/btf.h>
  21. #include <linux/objtool.h>
  22. #include <asm/page.h>
  23. #include <asm/sections.h>
  24. #include <crypto/sha1.h>
  25. #include "kallsyms_internal.h"
  26. #include "kexec_internal.h"
  27. /* Per cpu memory for storing cpu states in case of system crash. */
  28. note_buf_t __percpu *crash_notes;
  29. #ifdef CONFIG_CRASH_DUMP
  30. int kimage_crash_copy_vmcoreinfo(struct kimage *image)
  31. {
  32. struct page *vmcoreinfo_page;
  33. void *safecopy;
  34. if (!IS_ENABLED(CONFIG_CRASH_DUMP))
  35. return 0;
  36. if (image->type != KEXEC_TYPE_CRASH)
  37. return 0;
  38. /*
  39. * For kdump, allocate one vmcoreinfo safe copy from the
  40. * crash memory. as we have arch_kexec_protect_crashkres()
  41. * after kexec syscall, we naturally protect it from write
  42. * (even read) access under kernel direct mapping. But on
  43. * the other hand, we still need to operate it when crash
  44. * happens to generate vmcoreinfo note, hereby we rely on
  45. * vmap for this purpose.
  46. */
  47. vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
  48. if (!vmcoreinfo_page) {
  49. pr_warn("Could not allocate vmcoreinfo buffer\n");
  50. return -ENOMEM;
  51. }
  52. safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
  53. if (!safecopy) {
  54. pr_warn("Could not vmap vmcoreinfo buffer\n");
  55. return -ENOMEM;
  56. }
  57. image->vmcoreinfo_data_copy = safecopy;
  58. crash_update_vmcoreinfo_safecopy(safecopy);
  59. return 0;
  60. }
  61. int kexec_should_crash(struct task_struct *p)
  62. {
  63. /*
  64. * If crash_kexec_post_notifiers is enabled, don't run
  65. * crash_kexec() here yet, which must be run after panic
  66. * notifiers in panic().
  67. */
  68. if (crash_kexec_post_notifiers)
  69. return 0;
  70. /*
  71. * There are 4 panic() calls in make_task_dead() path, each of which
  72. * corresponds to each of these 4 conditions.
  73. */
  74. if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  75. return 1;
  76. return 0;
  77. }
  78. int kexec_crash_loaded(void)
  79. {
  80. return !!kexec_crash_image;
  81. }
  82. EXPORT_SYMBOL_GPL(kexec_crash_loaded);
  83. /*
  84. * No panic_cpu check version of crash_kexec(). This function is called
  85. * only when panic_cpu holds the current CPU number; this is the only CPU
  86. * which processes crash_kexec routines.
  87. */
  88. void __noclone __crash_kexec(struct pt_regs *regs)
  89. {
  90. /* Take the kexec_lock here to prevent sys_kexec_load
  91. * running on one cpu from replacing the crash kernel
  92. * we are using after a panic on a different cpu.
  93. *
  94. * If the crash kernel was not located in a fixed area
  95. * of memory the xchg(&kexec_crash_image) would be
  96. * sufficient. But since I reuse the memory...
  97. */
  98. if (kexec_trylock()) {
  99. if (kexec_crash_image) {
  100. struct pt_regs fixed_regs;
  101. crash_setup_regs(&fixed_regs, regs);
  102. crash_save_vmcoreinfo();
  103. machine_crash_shutdown(&fixed_regs);
  104. machine_kexec(kexec_crash_image);
  105. }
  106. kexec_unlock();
  107. }
  108. }
  109. STACK_FRAME_NON_STANDARD(__crash_kexec);
  110. __bpf_kfunc void crash_kexec(struct pt_regs *regs)
  111. {
  112. int old_cpu, this_cpu;
  113. /*
  114. * Only one CPU is allowed to execute the crash_kexec() code as with
  115. * panic(). Otherwise parallel calls of panic() and crash_kexec()
  116. * may stop each other. To exclude them, we use panic_cpu here too.
  117. */
  118. old_cpu = PANIC_CPU_INVALID;
  119. this_cpu = raw_smp_processor_id();
  120. if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) {
  121. /* This is the 1st CPU which comes here, so go ahead. */
  122. __crash_kexec(regs);
  123. /*
  124. * Reset panic_cpu to allow another panic()/crash_kexec()
  125. * call.
  126. */
  127. atomic_set(&panic_cpu, PANIC_CPU_INVALID);
  128. }
  129. }
  130. static inline resource_size_t crash_resource_size(const struct resource *res)
  131. {
  132. return !res->end ? 0 : resource_size(res);
  133. }
  134. int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
  135. void **addr, unsigned long *sz)
  136. {
  137. Elf64_Ehdr *ehdr;
  138. Elf64_Phdr *phdr;
  139. unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
  140. unsigned char *buf;
  141. unsigned int cpu, i;
  142. unsigned long long notes_addr;
  143. unsigned long mstart, mend;
  144. /* extra phdr for vmcoreinfo ELF note */
  145. nr_phdr = nr_cpus + 1;
  146. nr_phdr += mem->nr_ranges;
  147. /*
  148. * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
  149. * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
  150. * I think this is required by tools like gdb. So same physical
  151. * memory will be mapped in two ELF headers. One will contain kernel
  152. * text virtual addresses and other will have __va(physical) addresses.
  153. */
  154. nr_phdr++;
  155. elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
  156. elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
  157. buf = vzalloc(elf_sz);
  158. if (!buf)
  159. return -ENOMEM;
  160. ehdr = (Elf64_Ehdr *)buf;
  161. phdr = (Elf64_Phdr *)(ehdr + 1);
  162. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  163. ehdr->e_ident[EI_CLASS] = ELFCLASS64;
  164. ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
  165. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  166. ehdr->e_ident[EI_OSABI] = ELF_OSABI;
  167. memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
  168. ehdr->e_type = ET_CORE;
  169. ehdr->e_machine = ELF_ARCH;
  170. ehdr->e_version = EV_CURRENT;
  171. ehdr->e_phoff = sizeof(Elf64_Ehdr);
  172. ehdr->e_ehsize = sizeof(Elf64_Ehdr);
  173. ehdr->e_phentsize = sizeof(Elf64_Phdr);
  174. /* Prepare one phdr of type PT_NOTE for each possible CPU */
  175. for_each_possible_cpu(cpu) {
  176. phdr->p_type = PT_NOTE;
  177. notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
  178. phdr->p_offset = phdr->p_paddr = notes_addr;
  179. phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
  180. (ehdr->e_phnum)++;
  181. phdr++;
  182. }
  183. /* Prepare one PT_NOTE header for vmcoreinfo */
  184. phdr->p_type = PT_NOTE;
  185. phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
  186. phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
  187. (ehdr->e_phnum)++;
  188. phdr++;
  189. /* Prepare PT_LOAD type program header for kernel text region */
  190. if (need_kernel_map) {
  191. phdr->p_type = PT_LOAD;
  192. phdr->p_flags = PF_R|PF_W|PF_X;
  193. phdr->p_vaddr = (unsigned long) _text;
  194. phdr->p_filesz = phdr->p_memsz = _end - _text;
  195. phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
  196. ehdr->e_phnum++;
  197. phdr++;
  198. }
  199. /* Go through all the ranges in mem->ranges[] and prepare phdr */
  200. for (i = 0; i < mem->nr_ranges; i++) {
  201. mstart = mem->ranges[i].start;
  202. mend = mem->ranges[i].end;
  203. phdr->p_type = PT_LOAD;
  204. phdr->p_flags = PF_R|PF_W|PF_X;
  205. phdr->p_offset = mstart;
  206. phdr->p_paddr = mstart;
  207. phdr->p_vaddr = (unsigned long) __va(mstart);
  208. phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
  209. phdr->p_align = 0;
  210. ehdr->e_phnum++;
  211. #ifdef CONFIG_KEXEC_FILE
  212. kexec_dprintk("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
  213. phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
  214. ehdr->e_phnum, phdr->p_offset);
  215. #endif
  216. phdr++;
  217. }
  218. *addr = buf;
  219. *sz = elf_sz;
  220. return 0;
  221. }
  222. int crash_exclude_mem_range(struct crash_mem *mem,
  223. unsigned long long mstart, unsigned long long mend)
  224. {
  225. int i;
  226. unsigned long long start, end, p_start, p_end;
  227. for (i = 0; i < mem->nr_ranges; i++) {
  228. start = mem->ranges[i].start;
  229. end = mem->ranges[i].end;
  230. p_start = mstart;
  231. p_end = mend;
  232. if (p_start > end)
  233. continue;
  234. /*
  235. * Because the memory ranges in mem->ranges are stored in
  236. * ascending order, when we detect `p_end < start`, we can
  237. * immediately exit the for loop, as the subsequent memory
  238. * ranges will definitely be outside the range we are looking
  239. * for.
  240. */
  241. if (p_end < start)
  242. break;
  243. /* Truncate any area outside of range */
  244. if (p_start < start)
  245. p_start = start;
  246. if (p_end > end)
  247. p_end = end;
  248. /* Found completely overlapping range */
  249. if (p_start == start && p_end == end) {
  250. memmove(&mem->ranges[i], &mem->ranges[i + 1],
  251. (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
  252. i--;
  253. mem->nr_ranges--;
  254. } else if (p_start > start && p_end < end) {
  255. /* Split original range */
  256. if (mem->nr_ranges >= mem->max_nr_ranges)
  257. return -ENOMEM;
  258. memmove(&mem->ranges[i + 2], &mem->ranges[i + 1],
  259. (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
  260. mem->ranges[i].end = p_start - 1;
  261. mem->ranges[i + 1].start = p_end + 1;
  262. mem->ranges[i + 1].end = end;
  263. i++;
  264. mem->nr_ranges++;
  265. } else if (p_start != start)
  266. mem->ranges[i].end = p_start - 1;
  267. else
  268. mem->ranges[i].start = p_end + 1;
  269. }
  270. return 0;
  271. }
  272. ssize_t crash_get_memory_size(void)
  273. {
  274. ssize_t size = 0;
  275. if (!kexec_trylock())
  276. return -EBUSY;
  277. size += crash_resource_size(&crashk_res);
  278. size += crash_resource_size(&crashk_low_res);
  279. kexec_unlock();
  280. return size;
  281. }
  282. static int __crash_shrink_memory(struct resource *old_res,
  283. unsigned long new_size)
  284. {
  285. struct resource *ram_res;
  286. ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
  287. if (!ram_res)
  288. return -ENOMEM;
  289. ram_res->start = old_res->start + new_size;
  290. ram_res->end = old_res->end;
  291. ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
  292. ram_res->name = "System RAM";
  293. if (!new_size) {
  294. release_resource(old_res);
  295. old_res->start = 0;
  296. old_res->end = 0;
  297. } else {
  298. crashk_res.end = ram_res->start - 1;
  299. }
  300. crash_free_reserved_phys_range(ram_res->start, ram_res->end);
  301. insert_resource(&iomem_resource, ram_res);
  302. return 0;
  303. }
  304. int crash_shrink_memory(unsigned long new_size)
  305. {
  306. int ret = 0;
  307. unsigned long old_size, low_size;
  308. if (!kexec_trylock())
  309. return -EBUSY;
  310. if (kexec_crash_image) {
  311. ret = -ENOENT;
  312. goto unlock;
  313. }
  314. low_size = crash_resource_size(&crashk_low_res);
  315. old_size = crash_resource_size(&crashk_res) + low_size;
  316. new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN);
  317. if (new_size >= old_size) {
  318. ret = (new_size == old_size) ? 0 : -EINVAL;
  319. goto unlock;
  320. }
  321. /*
  322. * (low_size > new_size) implies that low_size is greater than zero.
  323. * This also means that if low_size is zero, the else branch is taken.
  324. *
  325. * If low_size is greater than 0, (low_size > new_size) indicates that
  326. * crashk_low_res also needs to be shrunken. Otherwise, only crashk_res
  327. * needs to be shrunken.
  328. */
  329. if (low_size > new_size) {
  330. ret = __crash_shrink_memory(&crashk_res, 0);
  331. if (ret)
  332. goto unlock;
  333. ret = __crash_shrink_memory(&crashk_low_res, new_size);
  334. } else {
  335. ret = __crash_shrink_memory(&crashk_res, new_size - low_size);
  336. }
  337. /* Swap crashk_res and crashk_low_res if needed */
  338. if (!crashk_res.end && crashk_low_res.end) {
  339. crashk_res.start = crashk_low_res.start;
  340. crashk_res.end = crashk_low_res.end;
  341. release_resource(&crashk_low_res);
  342. crashk_low_res.start = 0;
  343. crashk_low_res.end = 0;
  344. insert_resource(&iomem_resource, &crashk_res);
  345. }
  346. unlock:
  347. kexec_unlock();
  348. return ret;
  349. }
  350. void crash_save_cpu(struct pt_regs *regs, int cpu)
  351. {
  352. struct elf_prstatus prstatus;
  353. u32 *buf;
  354. if ((cpu < 0) || (cpu >= nr_cpu_ids))
  355. return;
  356. /* Using ELF notes here is opportunistic.
  357. * I need a well defined structure format
  358. * for the data I pass, and I need tags
  359. * on the data to indicate what information I have
  360. * squirrelled away. ELF notes happen to provide
  361. * all of that, so there is no need to invent something new.
  362. */
  363. buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
  364. if (!buf)
  365. return;
  366. memset(&prstatus, 0, sizeof(prstatus));
  367. prstatus.common.pr_pid = current->pid;
  368. elf_core_copy_regs(&prstatus.pr_reg, regs);
  369. buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
  370. &prstatus, sizeof(prstatus));
  371. final_note(buf);
  372. }
  373. static int __init crash_notes_memory_init(void)
  374. {
  375. /* Allocate memory for saving cpu registers. */
  376. size_t size, align;
  377. /*
  378. * crash_notes could be allocated across 2 vmalloc pages when percpu
  379. * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
  380. * pages are also on 2 continuous physical pages. In this case the
  381. * 2nd part of crash_notes in 2nd page could be lost since only the
  382. * starting address and size of crash_notes are exported through sysfs.
  383. * Here round up the size of crash_notes to the nearest power of two
  384. * and pass it to __alloc_percpu as align value. This can make sure
  385. * crash_notes is allocated inside one physical page.
  386. */
  387. size = sizeof(note_buf_t);
  388. align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
  389. /*
  390. * Break compile if size is bigger than PAGE_SIZE since crash_notes
  391. * definitely will be in 2 pages with that.
  392. */
  393. BUILD_BUG_ON(size > PAGE_SIZE);
  394. crash_notes = __alloc_percpu(size, align);
  395. if (!crash_notes) {
  396. pr_warn("Memory allocation for saving cpu register states failed\n");
  397. return -ENOMEM;
  398. }
  399. return 0;
  400. }
  401. subsys_initcall(crash_notes_memory_init);
  402. #endif /*CONFIG_CRASH_DUMP*/
  403. #ifdef CONFIG_CRASH_HOTPLUG
  404. #undef pr_fmt
  405. #define pr_fmt(fmt) "crash hp: " fmt
  406. /*
  407. * Different than kexec/kdump loading/unloading/jumping/shrinking which
  408. * usually rarely happen, there will be many crash hotplug events notified
  409. * during one short period, e.g one memory board is hot added and memory
  410. * regions are online. So mutex lock __crash_hotplug_lock is used to
  411. * serialize the crash hotplug handling specifically.
  412. */
  413. static DEFINE_MUTEX(__crash_hotplug_lock);
  414. #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock)
  415. #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock)
  416. /*
  417. * This routine utilized when the crash_hotplug sysfs node is read.
  418. * It reflects the kernel's ability/permission to update the kdump
  419. * image directly.
  420. */
  421. int crash_check_hotplug_support(void)
  422. {
  423. int rc = 0;
  424. crash_hotplug_lock();
  425. /* Obtain lock while reading crash information */
  426. if (!kexec_trylock()) {
  427. pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
  428. crash_hotplug_unlock();
  429. return 0;
  430. }
  431. if (kexec_crash_image) {
  432. rc = kexec_crash_image->hotplug_support;
  433. }
  434. /* Release lock now that update complete */
  435. kexec_unlock();
  436. crash_hotplug_unlock();
  437. return rc;
  438. }
  439. /*
  440. * To accurately reflect hot un/plug changes of CPU and Memory resources
  441. * (including onling and offlining of those resources), the relevant
  442. * kexec segments must be updated with latest CPU and Memory resources.
  443. *
  444. * Architectures must ensure two things for all segments that need
  445. * updating during hotplug events:
  446. *
  447. * 1. Segments must be large enough to accommodate a growing number of
  448. * resources.
  449. * 2. Exclude the segments from SHA verification.
  450. *
  451. * For example, on most architectures, the elfcorehdr (which is passed
  452. * to the crash kernel via the elfcorehdr= parameter) must include the
  453. * new list of CPUs and memory. To make changes to the elfcorehdr, it
  454. * should be large enough to permit a growing number of CPU and Memory
  455. * resources. One can estimate the elfcorehdr memory size based on
  456. * NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES. The elfcorehdr is
  457. * excluded from SHA verification by default if the architecture
  458. * supports crash hotplug.
  459. */
  460. static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, void *arg)
  461. {
  462. struct kimage *image;
  463. crash_hotplug_lock();
  464. /* Obtain lock while changing crash information */
  465. if (!kexec_trylock()) {
  466. pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
  467. crash_hotplug_unlock();
  468. return;
  469. }
  470. /* Check kdump is not loaded */
  471. if (!kexec_crash_image)
  472. goto out;
  473. image = kexec_crash_image;
  474. /* Check that kexec segments update is permitted */
  475. if (!image->hotplug_support)
  476. goto out;
  477. if (hp_action == KEXEC_CRASH_HP_ADD_CPU ||
  478. hp_action == KEXEC_CRASH_HP_REMOVE_CPU)
  479. pr_debug("hp_action %u, cpu %u\n", hp_action, cpu);
  480. else
  481. pr_debug("hp_action %u\n", hp_action);
  482. /*
  483. * The elfcorehdr_index is set to -1 when the struct kimage
  484. * is allocated. Find the segment containing the elfcorehdr,
  485. * if not already found.
  486. */
  487. if (image->elfcorehdr_index < 0) {
  488. unsigned long mem;
  489. unsigned char *ptr;
  490. unsigned int n;
  491. for (n = 0; n < image->nr_segments; n++) {
  492. mem = image->segment[n].mem;
  493. ptr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT));
  494. if (ptr) {
  495. /* The segment containing elfcorehdr */
  496. if (memcmp(ptr, ELFMAG, SELFMAG) == 0)
  497. image->elfcorehdr_index = (int)n;
  498. kunmap_local(ptr);
  499. }
  500. }
  501. }
  502. if (image->elfcorehdr_index < 0) {
  503. pr_err("unable to locate elfcorehdr segment");
  504. goto out;
  505. }
  506. /* Needed in order for the segments to be updated */
  507. arch_kexec_unprotect_crashkres();
  508. /* Differentiate between normal load and hotplug update */
  509. image->hp_action = hp_action;
  510. /* Now invoke arch-specific update handler */
  511. arch_crash_handle_hotplug_event(image, arg);
  512. /* No longer handling a hotplug event */
  513. image->hp_action = KEXEC_CRASH_HP_NONE;
  514. image->elfcorehdr_updated = true;
  515. /* Change back to read-only */
  516. arch_kexec_protect_crashkres();
  517. /* Errors in the callback is not a reason to rollback state */
  518. out:
  519. /* Release lock now that update complete */
  520. kexec_unlock();
  521. crash_hotplug_unlock();
  522. }
  523. static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *arg)
  524. {
  525. switch (val) {
  526. case MEM_ONLINE:
  527. crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_MEMORY,
  528. KEXEC_CRASH_HP_INVALID_CPU, arg);
  529. break;
  530. case MEM_OFFLINE:
  531. crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_MEMORY,
  532. KEXEC_CRASH_HP_INVALID_CPU, arg);
  533. break;
  534. }
  535. return NOTIFY_OK;
  536. }
  537. static struct notifier_block crash_memhp_nb = {
  538. .notifier_call = crash_memhp_notifier,
  539. .priority = 0
  540. };
  541. static int crash_cpuhp_online(unsigned int cpu)
  542. {
  543. crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu, NULL);
  544. return 0;
  545. }
  546. static int crash_cpuhp_offline(unsigned int cpu)
  547. {
  548. crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu, NULL);
  549. return 0;
  550. }
  551. static int __init crash_hotplug_init(void)
  552. {
  553. int result = 0;
  554. if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
  555. register_memory_notifier(&crash_memhp_nb);
  556. if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
  557. result = cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
  558. "crash/cpuhp", crash_cpuhp_online, crash_cpuhp_offline);
  559. }
  560. return result;
  561. }
  562. subsys_initcall(crash_hotplug_init);
  563. #endif