reboot.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/kernel/reboot.c
  4. *
  5. * Copyright (C) 2013 Linus Torvalds
  6. */
  7. #define pr_fmt(fmt) "reboot: " fmt
  8. #include <linux/atomic.h>
  9. #include <linux/ctype.h>
  10. #include <linux/export.h>
  11. #include <linux/kexec.h>
  12. #include <linux/kmod.h>
  13. #include <linux/kmsg_dump.h>
  14. #include <linux/reboot.h>
  15. #include <linux/suspend.h>
  16. #include <linux/syscalls.h>
  17. #include <linux/syscore_ops.h>
  18. #include <linux/uaccess.h>
  19. /*
  20. * this indicates whether you can reboot with ctrl-alt-del: the default is yes
  21. */
  22. static int C_A_D = 1;
  23. struct pid *cad_pid;
  24. EXPORT_SYMBOL(cad_pid);
  25. #if defined(CONFIG_ARM)
  26. #define DEFAULT_REBOOT_MODE = REBOOT_HARD
  27. #else
  28. #define DEFAULT_REBOOT_MODE
  29. #endif
  30. enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
  31. EXPORT_SYMBOL_GPL(reboot_mode);
  32. enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
  33. /*
  34. * This variable is used privately to keep track of whether or not
  35. * reboot_type is still set to its default value (i.e., reboot= hasn't
  36. * been set on the command line). This is needed so that we can
  37. * suppress DMI scanning for reboot quirks. Without it, it's
  38. * impossible to override a faulty reboot quirk without recompiling.
  39. */
  40. int reboot_default = 1;
  41. int reboot_cpu;
  42. enum reboot_type reboot_type = BOOT_ACPI;
  43. int reboot_force;
  44. struct sys_off_handler {
  45. struct notifier_block nb;
  46. int (*sys_off_cb)(struct sys_off_data *data);
  47. void *cb_data;
  48. enum sys_off_mode mode;
  49. bool blocking;
  50. void *list;
  51. struct device *dev;
  52. };
  53. /*
  54. * This variable is used to indicate if a halt was initiated instead of a
  55. * reboot when the reboot call was invoked with LINUX_REBOOT_CMD_POWER_OFF, but
  56. * the system cannot be powered off. This allowes kernel_halt() to notify users
  57. * of that.
  58. */
  59. static bool poweroff_fallback_to_halt;
  60. /*
  61. * Temporary stub that prevents linkage failure while we're in process
  62. * of removing all uses of legacy pm_power_off() around the kernel.
  63. */
  64. void __weak (*pm_power_off)(void);
  65. /**
  66. * emergency_restart - reboot the system
  67. *
  68. * Without shutting down any hardware or taking any locks
  69. * reboot the system. This is called when we know we are in
  70. * trouble so this is our best effort to reboot. This is
  71. * safe to call in interrupt context.
  72. */
  73. void emergency_restart(void)
  74. {
  75. kmsg_dump(KMSG_DUMP_EMERG);
  76. system_state = SYSTEM_RESTART;
  77. machine_emergency_restart();
  78. }
  79. EXPORT_SYMBOL_GPL(emergency_restart);
  80. void kernel_restart_prepare(char *cmd)
  81. {
  82. blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
  83. system_state = SYSTEM_RESTART;
  84. usermodehelper_disable();
  85. device_shutdown();
  86. }
  87. /**
  88. * register_reboot_notifier - Register function to be called at reboot time
  89. * @nb: Info about notifier function to be called
  90. *
  91. * Registers a function with the list of functions
  92. * to be called at reboot time.
  93. *
  94. * Currently always returns zero, as blocking_notifier_chain_register()
  95. * always returns zero.
  96. */
  97. int register_reboot_notifier(struct notifier_block *nb)
  98. {
  99. return blocking_notifier_chain_register(&reboot_notifier_list, nb);
  100. }
  101. EXPORT_SYMBOL(register_reboot_notifier);
  102. /**
  103. * unregister_reboot_notifier - Unregister previously registered reboot notifier
  104. * @nb: Hook to be unregistered
  105. *
  106. * Unregisters a previously registered reboot
  107. * notifier function.
  108. *
  109. * Returns zero on success, or %-ENOENT on failure.
  110. */
  111. int unregister_reboot_notifier(struct notifier_block *nb)
  112. {
  113. return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
  114. }
  115. EXPORT_SYMBOL(unregister_reboot_notifier);
  116. static void devm_unregister_reboot_notifier(struct device *dev, void *res)
  117. {
  118. WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res));
  119. }
  120. int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb)
  121. {
  122. struct notifier_block **rcnb;
  123. int ret;
  124. rcnb = devres_alloc(devm_unregister_reboot_notifier,
  125. sizeof(*rcnb), GFP_KERNEL);
  126. if (!rcnb)
  127. return -ENOMEM;
  128. ret = register_reboot_notifier(nb);
  129. if (!ret) {
  130. *rcnb = nb;
  131. devres_add(dev, rcnb);
  132. } else {
  133. devres_free(rcnb);
  134. }
  135. return ret;
  136. }
  137. EXPORT_SYMBOL(devm_register_reboot_notifier);
  138. /*
  139. * Notifier list for kernel code which wants to be called
  140. * to restart the system.
  141. */
  142. static ATOMIC_NOTIFIER_HEAD(restart_handler_list);
  143. /**
  144. * register_restart_handler - Register function to be called to reset
  145. * the system
  146. * @nb: Info about handler function to be called
  147. * @nb->priority: Handler priority. Handlers should follow the
  148. * following guidelines for setting priorities.
  149. * 0: Restart handler of last resort,
  150. * with limited restart capabilities
  151. * 128: Default restart handler; use if no other
  152. * restart handler is expected to be available,
  153. * and/or if restart functionality is
  154. * sufficient to restart the entire system
  155. * 255: Highest priority restart handler, will
  156. * preempt all other restart handlers
  157. *
  158. * Registers a function with code to be called to restart the
  159. * system.
  160. *
  161. * Registered functions will be called from machine_restart as last
  162. * step of the restart sequence (if the architecture specific
  163. * machine_restart function calls do_kernel_restart - see below
  164. * for details).
  165. * Registered functions are expected to restart the system immediately.
  166. * If more than one function is registered, the restart handler priority
  167. * selects which function will be called first.
  168. *
  169. * Restart handlers are expected to be registered from non-architecture
  170. * code, typically from drivers. A typical use case would be a system
  171. * where restart functionality is provided through a watchdog. Multiple
  172. * restart handlers may exist; for example, one restart handler might
  173. * restart the entire system, while another only restarts the CPU.
  174. * In such cases, the restart handler which only restarts part of the
  175. * hardware is expected to register with low priority to ensure that
  176. * it only runs if no other means to restart the system is available.
  177. *
  178. * Currently always returns zero, as atomic_notifier_chain_register()
  179. * always returns zero.
  180. */
  181. int register_restart_handler(struct notifier_block *nb)
  182. {
  183. return atomic_notifier_chain_register(&restart_handler_list, nb);
  184. }
  185. EXPORT_SYMBOL(register_restart_handler);
  186. /**
  187. * unregister_restart_handler - Unregister previously registered
  188. * restart handler
  189. * @nb: Hook to be unregistered
  190. *
  191. * Unregisters a previously registered restart handler function.
  192. *
  193. * Returns zero on success, or %-ENOENT on failure.
  194. */
  195. int unregister_restart_handler(struct notifier_block *nb)
  196. {
  197. return atomic_notifier_chain_unregister(&restart_handler_list, nb);
  198. }
  199. EXPORT_SYMBOL(unregister_restart_handler);
  200. /**
  201. * do_kernel_restart - Execute kernel restart handler call chain
  202. *
  203. * Calls functions registered with register_restart_handler.
  204. *
  205. * Expected to be called from machine_restart as last step of the restart
  206. * sequence.
  207. *
  208. * Restarts the system immediately if a restart handler function has been
  209. * registered. Otherwise does nothing.
  210. */
  211. void do_kernel_restart(char *cmd)
  212. {
  213. atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd);
  214. }
  215. void migrate_to_reboot_cpu(void)
  216. {
  217. /* The boot cpu is always logical cpu 0 */
  218. int cpu = reboot_cpu;
  219. cpu_hotplug_disable();
  220. /* Make certain the cpu I'm about to reboot on is online */
  221. if (!cpu_online(cpu))
  222. cpu = cpumask_first(cpu_online_mask);
  223. /* Prevent races with other tasks migrating this task */
  224. current->flags |= PF_NO_SETAFFINITY;
  225. /* Make certain I only run on the appropriate processor */
  226. set_cpus_allowed_ptr(current, cpumask_of(cpu));
  227. }
  228. /*
  229. * Notifier list for kernel code which wants to be called
  230. * to prepare system for restart.
  231. */
  232. static BLOCKING_NOTIFIER_HEAD(restart_prep_handler_list);
  233. static void do_kernel_restart_prepare(void)
  234. {
  235. blocking_notifier_call_chain(&restart_prep_handler_list, 0, NULL);
  236. }
  237. /**
  238. * kernel_restart - reboot the system
  239. * @cmd: pointer to buffer containing command to execute for restart
  240. * or %NULL
  241. *
  242. * Shutdown everything and perform a clean reboot.
  243. * This is not safe to call in interrupt context.
  244. */
  245. void kernel_restart(char *cmd)
  246. {
  247. kernel_restart_prepare(cmd);
  248. do_kernel_restart_prepare();
  249. migrate_to_reboot_cpu();
  250. syscore_shutdown();
  251. if (!cmd)
  252. pr_emerg("Restarting system\n");
  253. else
  254. pr_emerg("Restarting system with command '%s'\n", cmd);
  255. kmsg_dump(KMSG_DUMP_SHUTDOWN);
  256. machine_restart(cmd);
  257. }
  258. EXPORT_SYMBOL_GPL(kernel_restart);
  259. static void kernel_shutdown_prepare(enum system_states state)
  260. {
  261. blocking_notifier_call_chain(&reboot_notifier_list,
  262. (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
  263. system_state = state;
  264. usermodehelper_disable();
  265. device_shutdown();
  266. }
  267. /**
  268. * kernel_halt - halt the system
  269. *
  270. * Shutdown everything and perform a clean system halt.
  271. */
  272. void kernel_halt(void)
  273. {
  274. kernel_shutdown_prepare(SYSTEM_HALT);
  275. migrate_to_reboot_cpu();
  276. syscore_shutdown();
  277. if (poweroff_fallback_to_halt)
  278. pr_emerg("Power off not available: System halted instead\n");
  279. else
  280. pr_emerg("System halted\n");
  281. kmsg_dump(KMSG_DUMP_SHUTDOWN);
  282. machine_halt();
  283. }
  284. EXPORT_SYMBOL_GPL(kernel_halt);
  285. /*
  286. * Notifier list for kernel code which wants to be called
  287. * to prepare system for power off.
  288. */
  289. static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list);
  290. /*
  291. * Notifier list for kernel code which wants to be called
  292. * to power off system.
  293. */
  294. static ATOMIC_NOTIFIER_HEAD(power_off_handler_list);
  295. static int sys_off_notify(struct notifier_block *nb,
  296. unsigned long mode, void *cmd)
  297. {
  298. struct sys_off_handler *handler;
  299. struct sys_off_data data = {};
  300. handler = container_of(nb, struct sys_off_handler, nb);
  301. data.cb_data = handler->cb_data;
  302. data.mode = mode;
  303. data.cmd = cmd;
  304. data.dev = handler->dev;
  305. return handler->sys_off_cb(&data);
  306. }
  307. static struct sys_off_handler platform_sys_off_handler;
  308. static struct sys_off_handler *alloc_sys_off_handler(int priority)
  309. {
  310. struct sys_off_handler *handler;
  311. gfp_t flags;
  312. /*
  313. * Platforms like m68k can't allocate sys_off handler dynamically
  314. * at the early boot time because memory allocator isn't available yet.
  315. */
  316. if (priority == SYS_OFF_PRIO_PLATFORM) {
  317. handler = &platform_sys_off_handler;
  318. if (handler->cb_data)
  319. return ERR_PTR(-EBUSY);
  320. } else {
  321. if (system_state > SYSTEM_RUNNING)
  322. flags = GFP_ATOMIC;
  323. else
  324. flags = GFP_KERNEL;
  325. handler = kzalloc(sizeof(*handler), flags);
  326. if (!handler)
  327. return ERR_PTR(-ENOMEM);
  328. }
  329. return handler;
  330. }
  331. static void free_sys_off_handler(struct sys_off_handler *handler)
  332. {
  333. if (handler == &platform_sys_off_handler)
  334. memset(handler, 0, sizeof(*handler));
  335. else
  336. kfree(handler);
  337. }
  338. /**
  339. * register_sys_off_handler - Register sys-off handler
  340. * @mode: Sys-off mode
  341. * @priority: Handler priority
  342. * @callback: Callback function
  343. * @cb_data: Callback argument
  344. *
  345. * Registers system power-off or restart handler that will be invoked
  346. * at the step corresponding to the given sys-off mode. Handler's callback
  347. * should return NOTIFY_DONE to permit execution of the next handler in
  348. * the call chain or NOTIFY_STOP to break the chain (in error case for
  349. * example).
  350. *
  351. * Multiple handlers can be registered at the default priority level.
  352. *
  353. * Only one handler can be registered at the non-default priority level,
  354. * otherwise ERR_PTR(-EBUSY) is returned.
  355. *
  356. * Returns a new instance of struct sys_off_handler on success, or
  357. * an ERR_PTR()-encoded error code otherwise.
  358. */
  359. struct sys_off_handler *
  360. register_sys_off_handler(enum sys_off_mode mode,
  361. int priority,
  362. int (*callback)(struct sys_off_data *data),
  363. void *cb_data)
  364. {
  365. struct sys_off_handler *handler;
  366. int err;
  367. handler = alloc_sys_off_handler(priority);
  368. if (IS_ERR(handler))
  369. return handler;
  370. switch (mode) {
  371. case SYS_OFF_MODE_POWER_OFF_PREPARE:
  372. handler->list = &power_off_prep_handler_list;
  373. handler->blocking = true;
  374. break;
  375. case SYS_OFF_MODE_POWER_OFF:
  376. handler->list = &power_off_handler_list;
  377. break;
  378. case SYS_OFF_MODE_RESTART_PREPARE:
  379. handler->list = &restart_prep_handler_list;
  380. handler->blocking = true;
  381. break;
  382. case SYS_OFF_MODE_RESTART:
  383. handler->list = &restart_handler_list;
  384. break;
  385. default:
  386. free_sys_off_handler(handler);
  387. return ERR_PTR(-EINVAL);
  388. }
  389. handler->nb.notifier_call = sys_off_notify;
  390. handler->nb.priority = priority;
  391. handler->sys_off_cb = callback;
  392. handler->cb_data = cb_data;
  393. handler->mode = mode;
  394. if (handler->blocking) {
  395. if (priority == SYS_OFF_PRIO_DEFAULT)
  396. err = blocking_notifier_chain_register(handler->list,
  397. &handler->nb);
  398. else
  399. err = blocking_notifier_chain_register_unique_prio(handler->list,
  400. &handler->nb);
  401. } else {
  402. if (priority == SYS_OFF_PRIO_DEFAULT)
  403. err = atomic_notifier_chain_register(handler->list,
  404. &handler->nb);
  405. else
  406. err = atomic_notifier_chain_register_unique_prio(handler->list,
  407. &handler->nb);
  408. }
  409. if (err) {
  410. free_sys_off_handler(handler);
  411. return ERR_PTR(err);
  412. }
  413. return handler;
  414. }
  415. EXPORT_SYMBOL_GPL(register_sys_off_handler);
  416. /**
  417. * unregister_sys_off_handler - Unregister sys-off handler
  418. * @handler: Sys-off handler
  419. *
  420. * Unregisters given sys-off handler.
  421. */
  422. void unregister_sys_off_handler(struct sys_off_handler *handler)
  423. {
  424. int err;
  425. if (IS_ERR_OR_NULL(handler))
  426. return;
  427. if (handler->blocking)
  428. err = blocking_notifier_chain_unregister(handler->list,
  429. &handler->nb);
  430. else
  431. err = atomic_notifier_chain_unregister(handler->list,
  432. &handler->nb);
  433. /* sanity check, shall never happen */
  434. WARN_ON(err);
  435. free_sys_off_handler(handler);
  436. }
  437. EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
  438. static void devm_unregister_sys_off_handler(void *data)
  439. {
  440. struct sys_off_handler *handler = data;
  441. unregister_sys_off_handler(handler);
  442. }
  443. /**
  444. * devm_register_sys_off_handler - Register sys-off handler
  445. * @dev: Device that registers handler
  446. * @mode: Sys-off mode
  447. * @priority: Handler priority
  448. * @callback: Callback function
  449. * @cb_data: Callback argument
  450. *
  451. * Registers resource-managed sys-off handler.
  452. *
  453. * Returns zero on success, or error code on failure.
  454. */
  455. int devm_register_sys_off_handler(struct device *dev,
  456. enum sys_off_mode mode,
  457. int priority,
  458. int (*callback)(struct sys_off_data *data),
  459. void *cb_data)
  460. {
  461. struct sys_off_handler *handler;
  462. handler = register_sys_off_handler(mode, priority, callback, cb_data);
  463. if (IS_ERR(handler))
  464. return PTR_ERR(handler);
  465. handler->dev = dev;
  466. return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler,
  467. handler);
  468. }
  469. EXPORT_SYMBOL_GPL(devm_register_sys_off_handler);
  470. /**
  471. * devm_register_power_off_handler - Register power-off handler
  472. * @dev: Device that registers callback
  473. * @callback: Callback function
  474. * @cb_data: Callback's argument
  475. *
  476. * Registers resource-managed sys-off handler with a default priority
  477. * and using power-off mode.
  478. *
  479. * Returns zero on success, or error code on failure.
  480. */
  481. int devm_register_power_off_handler(struct device *dev,
  482. int (*callback)(struct sys_off_data *data),
  483. void *cb_data)
  484. {
  485. return devm_register_sys_off_handler(dev,
  486. SYS_OFF_MODE_POWER_OFF,
  487. SYS_OFF_PRIO_DEFAULT,
  488. callback, cb_data);
  489. }
  490. EXPORT_SYMBOL_GPL(devm_register_power_off_handler);
  491. /**
  492. * devm_register_restart_handler - Register restart handler
  493. * @dev: Device that registers callback
  494. * @callback: Callback function
  495. * @cb_data: Callback's argument
  496. *
  497. * Registers resource-managed sys-off handler with a default priority
  498. * and using restart mode.
  499. *
  500. * Returns zero on success, or error code on failure.
  501. */
  502. int devm_register_restart_handler(struct device *dev,
  503. int (*callback)(struct sys_off_data *data),
  504. void *cb_data)
  505. {
  506. return devm_register_sys_off_handler(dev,
  507. SYS_OFF_MODE_RESTART,
  508. SYS_OFF_PRIO_DEFAULT,
  509. callback, cb_data);
  510. }
  511. EXPORT_SYMBOL_GPL(devm_register_restart_handler);
  512. static struct sys_off_handler *platform_power_off_handler;
  513. static int platform_power_off_notify(struct sys_off_data *data)
  514. {
  515. void (*platform_power_power_off_cb)(void) = data->cb_data;
  516. platform_power_power_off_cb();
  517. return NOTIFY_DONE;
  518. }
  519. /**
  520. * register_platform_power_off - Register platform-level power-off callback
  521. * @power_off: Power-off callback
  522. *
  523. * Registers power-off callback that will be called as last step
  524. * of the power-off sequence. This callback is expected to be invoked
  525. * for the last resort. Only one platform power-off callback is allowed
  526. * to be registered at a time.
  527. *
  528. * Returns zero on success, or error code on failure.
  529. */
  530. int register_platform_power_off(void (*power_off)(void))
  531. {
  532. struct sys_off_handler *handler;
  533. handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
  534. SYS_OFF_PRIO_PLATFORM,
  535. platform_power_off_notify,
  536. power_off);
  537. if (IS_ERR(handler))
  538. return PTR_ERR(handler);
  539. platform_power_off_handler = handler;
  540. return 0;
  541. }
  542. EXPORT_SYMBOL_GPL(register_platform_power_off);
  543. /**
  544. * unregister_platform_power_off - Unregister platform-level power-off callback
  545. * @power_off: Power-off callback
  546. *
  547. * Unregisters previously registered platform power-off callback.
  548. */
  549. void unregister_platform_power_off(void (*power_off)(void))
  550. {
  551. if (platform_power_off_handler &&
  552. platform_power_off_handler->cb_data == power_off) {
  553. unregister_sys_off_handler(platform_power_off_handler);
  554. platform_power_off_handler = NULL;
  555. }
  556. }
  557. EXPORT_SYMBOL_GPL(unregister_platform_power_off);
  558. static int legacy_pm_power_off(struct sys_off_data *data)
  559. {
  560. if (pm_power_off)
  561. pm_power_off();
  562. return NOTIFY_DONE;
  563. }
  564. static void do_kernel_power_off_prepare(void)
  565. {
  566. blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL);
  567. }
  568. /**
  569. * do_kernel_power_off - Execute kernel power-off handler call chain
  570. *
  571. * Expected to be called as last step of the power-off sequence.
  572. *
  573. * Powers off the system immediately if a power-off handler function has
  574. * been registered. Otherwise does nothing.
  575. */
  576. void do_kernel_power_off(void)
  577. {
  578. struct sys_off_handler *sys_off = NULL;
  579. /*
  580. * Register sys-off handlers for legacy PM callback. This allows
  581. * legacy PM callbacks temporary co-exist with the new sys-off API.
  582. *
  583. * TODO: Remove legacy handlers once all legacy PM users will be
  584. * switched to the sys-off based APIs.
  585. */
  586. if (pm_power_off)
  587. sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
  588. SYS_OFF_PRIO_DEFAULT,
  589. legacy_pm_power_off, NULL);
  590. atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
  591. unregister_sys_off_handler(sys_off);
  592. }
  593. /**
  594. * kernel_can_power_off - check whether system can be powered off
  595. *
  596. * Returns true if power-off handler is registered and system can be
  597. * powered off, false otherwise.
  598. */
  599. bool kernel_can_power_off(void)
  600. {
  601. return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) ||
  602. pm_power_off;
  603. }
  604. EXPORT_SYMBOL_GPL(kernel_can_power_off);
  605. /**
  606. * kernel_power_off - power_off the system
  607. *
  608. * Shutdown everything and perform a clean system power_off.
  609. */
  610. void kernel_power_off(void)
  611. {
  612. kernel_shutdown_prepare(SYSTEM_POWER_OFF);
  613. do_kernel_power_off_prepare();
  614. migrate_to_reboot_cpu();
  615. syscore_shutdown();
  616. pr_emerg("Power down\n");
  617. pr_flush(1000, true);
  618. kmsg_dump(KMSG_DUMP_SHUTDOWN);
  619. machine_power_off();
  620. }
  621. EXPORT_SYMBOL_GPL(kernel_power_off);
  622. DEFINE_MUTEX(system_transition_mutex);
  623. /*
  624. * Reboot system call: for obvious reasons only root may call it,
  625. * and even root needs to set up some magic numbers in the registers
  626. * so that some mistake won't make this reboot the whole machine.
  627. * You can also set the meaning of the ctrl-alt-del-key here.
  628. *
  629. * reboot doesn't sync: do that yourself before calling this.
  630. */
  631. SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
  632. void __user *, arg)
  633. {
  634. struct pid_namespace *pid_ns = task_active_pid_ns(current);
  635. char buffer[256];
  636. int ret = 0;
  637. /* We only trust the superuser with rebooting the system. */
  638. if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT))
  639. return -EPERM;
  640. /* For safety, we require "magic" arguments. */
  641. if (magic1 != LINUX_REBOOT_MAGIC1 ||
  642. (magic2 != LINUX_REBOOT_MAGIC2 &&
  643. magic2 != LINUX_REBOOT_MAGIC2A &&
  644. magic2 != LINUX_REBOOT_MAGIC2B &&
  645. magic2 != LINUX_REBOOT_MAGIC2C))
  646. return -EINVAL;
  647. /*
  648. * If pid namespaces are enabled and the current task is in a child
  649. * pid_namespace, the command is handled by reboot_pid_ns() which will
  650. * call do_exit().
  651. */
  652. ret = reboot_pid_ns(pid_ns, cmd);
  653. if (ret)
  654. return ret;
  655. /* Instead of trying to make the power_off code look like
  656. * halt when pm_power_off is not set do it the easy way.
  657. */
  658. if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off()) {
  659. poweroff_fallback_to_halt = true;
  660. cmd = LINUX_REBOOT_CMD_HALT;
  661. }
  662. mutex_lock(&system_transition_mutex);
  663. switch (cmd) {
  664. case LINUX_REBOOT_CMD_RESTART:
  665. kernel_restart(NULL);
  666. break;
  667. case LINUX_REBOOT_CMD_CAD_ON:
  668. C_A_D = 1;
  669. break;
  670. case LINUX_REBOOT_CMD_CAD_OFF:
  671. C_A_D = 0;
  672. break;
  673. case LINUX_REBOOT_CMD_HALT:
  674. kernel_halt();
  675. do_exit(0);
  676. case LINUX_REBOOT_CMD_POWER_OFF:
  677. kernel_power_off();
  678. do_exit(0);
  679. break;
  680. case LINUX_REBOOT_CMD_RESTART2:
  681. ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1);
  682. if (ret < 0) {
  683. ret = -EFAULT;
  684. break;
  685. }
  686. buffer[sizeof(buffer) - 1] = '\0';
  687. kernel_restart(buffer);
  688. break;
  689. #ifdef CONFIG_KEXEC_CORE
  690. case LINUX_REBOOT_CMD_KEXEC:
  691. ret = kernel_kexec();
  692. break;
  693. #endif
  694. #ifdef CONFIG_HIBERNATION
  695. case LINUX_REBOOT_CMD_SW_SUSPEND:
  696. ret = hibernate();
  697. break;
  698. #endif
  699. default:
  700. ret = -EINVAL;
  701. break;
  702. }
  703. mutex_unlock(&system_transition_mutex);
  704. return ret;
  705. }
  706. static void deferred_cad(struct work_struct *dummy)
  707. {
  708. kernel_restart(NULL);
  709. }
  710. /*
  711. * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
  712. * As it's called within an interrupt, it may NOT sync: the only choice
  713. * is whether to reboot at once, or just ignore the ctrl-alt-del.
  714. */
  715. void ctrl_alt_del(void)
  716. {
  717. static DECLARE_WORK(cad_work, deferred_cad);
  718. if (C_A_D)
  719. schedule_work(&cad_work);
  720. else
  721. kill_cad_pid(SIGINT, 1);
  722. }
  723. #define POWEROFF_CMD_PATH_LEN 256
  724. static char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
  725. static const char reboot_cmd[] = "/sbin/reboot";
  726. static int run_cmd(const char *cmd)
  727. {
  728. char **argv;
  729. static char *envp[] = {
  730. "HOME=/",
  731. "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
  732. NULL
  733. };
  734. int ret;
  735. argv = argv_split(GFP_KERNEL, cmd, NULL);
  736. if (argv) {
  737. ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
  738. argv_free(argv);
  739. } else {
  740. ret = -ENOMEM;
  741. }
  742. return ret;
  743. }
  744. static int __orderly_reboot(void)
  745. {
  746. int ret;
  747. ret = run_cmd(reboot_cmd);
  748. if (ret) {
  749. pr_warn("Failed to start orderly reboot: forcing the issue\n");
  750. emergency_sync();
  751. kernel_restart(NULL);
  752. }
  753. return ret;
  754. }
  755. static int __orderly_poweroff(bool force)
  756. {
  757. int ret;
  758. ret = run_cmd(poweroff_cmd);
  759. if (ret && force) {
  760. pr_warn("Failed to start orderly shutdown: forcing the issue\n");
  761. /*
  762. * I guess this should try to kick off some daemon to sync and
  763. * poweroff asap. Or not even bother syncing if we're doing an
  764. * emergency shutdown?
  765. */
  766. emergency_sync();
  767. kernel_power_off();
  768. }
  769. return ret;
  770. }
  771. static bool poweroff_force;
  772. static void poweroff_work_func(struct work_struct *work)
  773. {
  774. __orderly_poweroff(poweroff_force);
  775. }
  776. static DECLARE_WORK(poweroff_work, poweroff_work_func);
  777. /**
  778. * orderly_poweroff - Trigger an orderly system poweroff
  779. * @force: force poweroff if command execution fails
  780. *
  781. * This may be called from any context to trigger a system shutdown.
  782. * If the orderly shutdown fails, it will force an immediate shutdown.
  783. */
  784. void orderly_poweroff(bool force)
  785. {
  786. if (force) /* do not override the pending "true" */
  787. poweroff_force = true;
  788. schedule_work(&poweroff_work);
  789. }
  790. EXPORT_SYMBOL_GPL(orderly_poweroff);
  791. static void reboot_work_func(struct work_struct *work)
  792. {
  793. __orderly_reboot();
  794. }
  795. static DECLARE_WORK(reboot_work, reboot_work_func);
  796. /**
  797. * orderly_reboot - Trigger an orderly system reboot
  798. *
  799. * This may be called from any context to trigger a system reboot.
  800. * If the orderly reboot fails, it will force an immediate reboot.
  801. */
  802. void orderly_reboot(void)
  803. {
  804. schedule_work(&reboot_work);
  805. }
  806. EXPORT_SYMBOL_GPL(orderly_reboot);
  807. /**
  808. * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay
  809. * @work: work_struct associated with the emergency poweroff function
  810. *
  811. * This function is called in very critical situations to force
  812. * a kernel poweroff after a configurable timeout value.
  813. */
  814. static void hw_failure_emergency_poweroff_func(struct work_struct *work)
  815. {
  816. /*
  817. * We have reached here after the emergency shutdown waiting period has
  818. * expired. This means orderly_poweroff has not been able to shut off
  819. * the system for some reason.
  820. *
  821. * Try to shut down the system immediately using kernel_power_off
  822. * if populated
  823. */
  824. pr_emerg("Hardware protection timed-out. Trying forced poweroff\n");
  825. kernel_power_off();
  826. /*
  827. * Worst of the worst case trigger emergency restart
  828. */
  829. pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
  830. emergency_restart();
  831. }
  832. static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
  833. hw_failure_emergency_poweroff_func);
  834. /**
  835. * hw_failure_emergency_poweroff - Trigger an emergency system poweroff
  836. *
  837. * This may be called from any critical situation to trigger a system shutdown
  838. * after a given period of time. If time is negative this is not scheduled.
  839. */
  840. static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
  841. {
  842. if (poweroff_delay_ms <= 0)
  843. return;
  844. schedule_delayed_work(&hw_failure_emergency_poweroff_work,
  845. msecs_to_jiffies(poweroff_delay_ms));
  846. }
  847. /**
  848. * __hw_protection_shutdown - Trigger an emergency system shutdown or reboot
  849. *
  850. * @reason: Reason of emergency shutdown or reboot to be printed.
  851. * @ms_until_forced: Time to wait for orderly shutdown or reboot before
  852. * triggering it. Negative value disables the forced
  853. * shutdown or reboot.
  854. * @shutdown: If true, indicates that a shutdown will happen
  855. * after the critical tempeature is reached.
  856. * If false, indicates that a reboot will happen
  857. * after the critical tempeature is reached.
  858. *
  859. * Initiate an emergency system shutdown or reboot in order to protect
  860. * hardware from further damage. Usage examples include a thermal protection.
  861. * NOTE: The request is ignored if protection shutdown or reboot is already
  862. * pending even if the previous request has given a large timeout for forced
  863. * shutdown/reboot.
  864. */
  865. void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown)
  866. {
  867. static atomic_t allow_proceed = ATOMIC_INIT(1);
  868. pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
  869. /* Shutdown should be initiated only once. */
  870. if (!atomic_dec_and_test(&allow_proceed))
  871. return;
  872. /*
  873. * Queue a backup emergency shutdown in the event of
  874. * orderly_poweroff failure
  875. */
  876. hw_failure_emergency_poweroff(ms_until_forced);
  877. if (shutdown)
  878. orderly_poweroff(true);
  879. else
  880. orderly_reboot();
  881. }
  882. EXPORT_SYMBOL_GPL(__hw_protection_shutdown);
  883. static int __init reboot_setup(char *str)
  884. {
  885. for (;;) {
  886. enum reboot_mode *mode;
  887. /*
  888. * Having anything passed on the command line via
  889. * reboot= will cause us to disable DMI checking
  890. * below.
  891. */
  892. reboot_default = 0;
  893. if (!strncmp(str, "panic_", 6)) {
  894. mode = &panic_reboot_mode;
  895. str += 6;
  896. } else {
  897. mode = &reboot_mode;
  898. }
  899. switch (*str) {
  900. case 'w':
  901. *mode = REBOOT_WARM;
  902. break;
  903. case 'c':
  904. *mode = REBOOT_COLD;
  905. break;
  906. case 'h':
  907. *mode = REBOOT_HARD;
  908. break;
  909. case 's':
  910. /*
  911. * reboot_cpu is s[mp]#### with #### being the processor
  912. * to be used for rebooting. Skip 's' or 'smp' prefix.
  913. */
  914. str += str[1] == 'm' && str[2] == 'p' ? 3 : 1;
  915. if (isdigit(str[0])) {
  916. int cpu = simple_strtoul(str, NULL, 0);
  917. if (cpu >= num_possible_cpus()) {
  918. pr_err("Ignoring the CPU number in reboot= option. "
  919. "CPU %d exceeds possible cpu number %d\n",
  920. cpu, num_possible_cpus());
  921. break;
  922. }
  923. reboot_cpu = cpu;
  924. } else
  925. *mode = REBOOT_SOFT;
  926. break;
  927. case 'g':
  928. *mode = REBOOT_GPIO;
  929. break;
  930. case 'b':
  931. case 'a':
  932. case 'k':
  933. case 't':
  934. case 'e':
  935. case 'p':
  936. reboot_type = *str;
  937. break;
  938. case 'f':
  939. reboot_force = 1;
  940. break;
  941. }
  942. str = strchr(str, ',');
  943. if (str)
  944. str++;
  945. else
  946. break;
  947. }
  948. return 1;
  949. }
  950. __setup("reboot=", reboot_setup);
  951. #ifdef CONFIG_SYSFS
  952. #define REBOOT_COLD_STR "cold"
  953. #define REBOOT_WARM_STR "warm"
  954. #define REBOOT_HARD_STR "hard"
  955. #define REBOOT_SOFT_STR "soft"
  956. #define REBOOT_GPIO_STR "gpio"
  957. #define REBOOT_UNDEFINED_STR "undefined"
  958. #define BOOT_TRIPLE_STR "triple"
  959. #define BOOT_KBD_STR "kbd"
  960. #define BOOT_BIOS_STR "bios"
  961. #define BOOT_ACPI_STR "acpi"
  962. #define BOOT_EFI_STR "efi"
  963. #define BOOT_PCI_STR "pci"
  964. static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  965. {
  966. const char *val;
  967. switch (reboot_mode) {
  968. case REBOOT_COLD:
  969. val = REBOOT_COLD_STR;
  970. break;
  971. case REBOOT_WARM:
  972. val = REBOOT_WARM_STR;
  973. break;
  974. case REBOOT_HARD:
  975. val = REBOOT_HARD_STR;
  976. break;
  977. case REBOOT_SOFT:
  978. val = REBOOT_SOFT_STR;
  979. break;
  980. case REBOOT_GPIO:
  981. val = REBOOT_GPIO_STR;
  982. break;
  983. default:
  984. val = REBOOT_UNDEFINED_STR;
  985. }
  986. return sprintf(buf, "%s\n", val);
  987. }
  988. static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
  989. const char *buf, size_t count)
  990. {
  991. if (!capable(CAP_SYS_BOOT))
  992. return -EPERM;
  993. if (!strncmp(buf, REBOOT_COLD_STR, strlen(REBOOT_COLD_STR)))
  994. reboot_mode = REBOOT_COLD;
  995. else if (!strncmp(buf, REBOOT_WARM_STR, strlen(REBOOT_WARM_STR)))
  996. reboot_mode = REBOOT_WARM;
  997. else if (!strncmp(buf, REBOOT_HARD_STR, strlen(REBOOT_HARD_STR)))
  998. reboot_mode = REBOOT_HARD;
  999. else if (!strncmp(buf, REBOOT_SOFT_STR, strlen(REBOOT_SOFT_STR)))
  1000. reboot_mode = REBOOT_SOFT;
  1001. else if (!strncmp(buf, REBOOT_GPIO_STR, strlen(REBOOT_GPIO_STR)))
  1002. reboot_mode = REBOOT_GPIO;
  1003. else
  1004. return -EINVAL;
  1005. reboot_default = 0;
  1006. return count;
  1007. }
  1008. static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode);
  1009. #ifdef CONFIG_X86
  1010. static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  1011. {
  1012. return sprintf(buf, "%d\n", reboot_force);
  1013. }
  1014. static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
  1015. const char *buf, size_t count)
  1016. {
  1017. bool res;
  1018. if (!capable(CAP_SYS_BOOT))
  1019. return -EPERM;
  1020. if (kstrtobool(buf, &res))
  1021. return -EINVAL;
  1022. reboot_default = 0;
  1023. reboot_force = res;
  1024. return count;
  1025. }
  1026. static struct kobj_attribute reboot_force_attr = __ATTR_RW(force);
  1027. static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  1028. {
  1029. const char *val;
  1030. switch (reboot_type) {
  1031. case BOOT_TRIPLE:
  1032. val = BOOT_TRIPLE_STR;
  1033. break;
  1034. case BOOT_KBD:
  1035. val = BOOT_KBD_STR;
  1036. break;
  1037. case BOOT_BIOS:
  1038. val = BOOT_BIOS_STR;
  1039. break;
  1040. case BOOT_ACPI:
  1041. val = BOOT_ACPI_STR;
  1042. break;
  1043. case BOOT_EFI:
  1044. val = BOOT_EFI_STR;
  1045. break;
  1046. case BOOT_CF9_FORCE:
  1047. val = BOOT_PCI_STR;
  1048. break;
  1049. default:
  1050. val = REBOOT_UNDEFINED_STR;
  1051. }
  1052. return sprintf(buf, "%s\n", val);
  1053. }
  1054. static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr,
  1055. const char *buf, size_t count)
  1056. {
  1057. if (!capable(CAP_SYS_BOOT))
  1058. return -EPERM;
  1059. if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR)))
  1060. reboot_type = BOOT_TRIPLE;
  1061. else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR)))
  1062. reboot_type = BOOT_KBD;
  1063. else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR)))
  1064. reboot_type = BOOT_BIOS;
  1065. else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR)))
  1066. reboot_type = BOOT_ACPI;
  1067. else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR)))
  1068. reboot_type = BOOT_EFI;
  1069. else if (!strncmp(buf, BOOT_PCI_STR, strlen(BOOT_PCI_STR)))
  1070. reboot_type = BOOT_CF9_FORCE;
  1071. else
  1072. return -EINVAL;
  1073. reboot_default = 0;
  1074. return count;
  1075. }
  1076. static struct kobj_attribute reboot_type_attr = __ATTR_RW(type);
  1077. #endif
  1078. #ifdef CONFIG_SMP
  1079. static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  1080. {
  1081. return sprintf(buf, "%d\n", reboot_cpu);
  1082. }
  1083. static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr,
  1084. const char *buf, size_t count)
  1085. {
  1086. unsigned int cpunum;
  1087. int rc;
  1088. if (!capable(CAP_SYS_BOOT))
  1089. return -EPERM;
  1090. rc = kstrtouint(buf, 0, &cpunum);
  1091. if (rc)
  1092. return rc;
  1093. if (cpunum >= num_possible_cpus())
  1094. return -ERANGE;
  1095. reboot_default = 0;
  1096. reboot_cpu = cpunum;
  1097. return count;
  1098. }
  1099. static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
  1100. #endif
  1101. static struct attribute *reboot_attrs[] = {
  1102. &reboot_mode_attr.attr,
  1103. #ifdef CONFIG_X86
  1104. &reboot_force_attr.attr,
  1105. &reboot_type_attr.attr,
  1106. #endif
  1107. #ifdef CONFIG_SMP
  1108. &reboot_cpu_attr.attr,
  1109. #endif
  1110. NULL,
  1111. };
  1112. #ifdef CONFIG_SYSCTL
  1113. static struct ctl_table kern_reboot_table[] = {
  1114. {
  1115. .procname = "poweroff_cmd",
  1116. .data = &poweroff_cmd,
  1117. .maxlen = POWEROFF_CMD_PATH_LEN,
  1118. .mode = 0644,
  1119. .proc_handler = proc_dostring,
  1120. },
  1121. {
  1122. .procname = "ctrl-alt-del",
  1123. .data = &C_A_D,
  1124. .maxlen = sizeof(int),
  1125. .mode = 0644,
  1126. .proc_handler = proc_dointvec,
  1127. },
  1128. };
  1129. static void __init kernel_reboot_sysctls_init(void)
  1130. {
  1131. register_sysctl_init("kernel", kern_reboot_table);
  1132. }
  1133. #else
  1134. #define kernel_reboot_sysctls_init() do { } while (0)
  1135. #endif /* CONFIG_SYSCTL */
  1136. static const struct attribute_group reboot_attr_group = {
  1137. .attrs = reboot_attrs,
  1138. };
  1139. static int __init reboot_ksysfs_init(void)
  1140. {
  1141. struct kobject *reboot_kobj;
  1142. int ret;
  1143. reboot_kobj = kobject_create_and_add("reboot", kernel_kobj);
  1144. if (!reboot_kobj)
  1145. return -ENOMEM;
  1146. ret = sysfs_create_group(reboot_kobj, &reboot_attr_group);
  1147. if (ret) {
  1148. kobject_put(reboot_kobj);
  1149. return ret;
  1150. }
  1151. kernel_reboot_sysctls_init();
  1152. return 0;
  1153. }
  1154. late_initcall(reboot_ksysfs_init);
  1155. #endif