mce.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /*
  2. * Machine check exception handling.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright 2013 IBM Corporation
  19. * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
  20. */
  21. #undef DEBUG
  22. #define pr_fmt(fmt) "mce: " fmt
  23. #include <linux/hardirq.h>
  24. #include <linux/types.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/percpu.h>
  27. #include <linux/export.h>
  28. #include <linux/irq_work.h>
  29. #include <asm/machdep.h>
  30. #include <asm/mce.h>
  31. static DEFINE_PER_CPU(int, mce_nest_count);
  32. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
  33. /* Queue for delayed MCE events. */
  34. static DEFINE_PER_CPU(int, mce_queue_count);
  35. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
  36. /* Queue for delayed MCE UE events. */
  37. static DEFINE_PER_CPU(int, mce_ue_count);
  38. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
  39. mce_ue_event_queue);
  40. static void machine_check_process_queued_event(struct irq_work *work);
  41. static void machine_check_ue_irq_work(struct irq_work *work);
  42. void machine_check_ue_event(struct machine_check_event *evt);
  43. static void machine_process_ue_event(struct work_struct *work);
  44. static struct irq_work mce_event_process_work = {
  45. .func = machine_check_process_queued_event,
  46. };
  47. static struct irq_work mce_ue_event_irq_work = {
  48. .func = machine_check_ue_irq_work,
  49. };
  50. DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
  51. static void mce_set_error_info(struct machine_check_event *mce,
  52. struct mce_error_info *mce_err)
  53. {
  54. mce->error_type = mce_err->error_type;
  55. switch (mce_err->error_type) {
  56. case MCE_ERROR_TYPE_UE:
  57. mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
  58. break;
  59. case MCE_ERROR_TYPE_SLB:
  60. mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
  61. break;
  62. case MCE_ERROR_TYPE_ERAT:
  63. mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
  64. break;
  65. case MCE_ERROR_TYPE_TLB:
  66. mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
  67. break;
  68. case MCE_ERROR_TYPE_USER:
  69. mce->u.user_error.user_error_type = mce_err->u.user_error_type;
  70. break;
  71. case MCE_ERROR_TYPE_RA:
  72. mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
  73. break;
  74. case MCE_ERROR_TYPE_LINK:
  75. mce->u.link_error.link_error_type = mce_err->u.link_error_type;
  76. break;
  77. case MCE_ERROR_TYPE_UNKNOWN:
  78. default:
  79. break;
  80. }
  81. }
  82. /*
  83. * Decode and save high level MCE information into per cpu buffer which
  84. * is an array of machine_check_event structure.
  85. */
  86. void save_mce_event(struct pt_regs *regs, long handled,
  87. struct mce_error_info *mce_err,
  88. uint64_t nip, uint64_t addr, uint64_t phys_addr)
  89. {
  90. int index = __this_cpu_inc_return(mce_nest_count) - 1;
  91. struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
  92. /*
  93. * Return if we don't have enough space to log mce event.
  94. * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
  95. * the check below will stop buffer overrun.
  96. */
  97. if (index >= MAX_MC_EVT)
  98. return;
  99. /* Populate generic machine check info */
  100. mce->version = MCE_V1;
  101. mce->srr0 = nip;
  102. mce->srr1 = regs->msr;
  103. mce->gpr3 = regs->gpr[3];
  104. mce->in_use = 1;
  105. /* Mark it recovered if we have handled it and MSR(RI=1). */
  106. if (handled && (regs->msr & MSR_RI))
  107. mce->disposition = MCE_DISPOSITION_RECOVERED;
  108. else
  109. mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
  110. mce->initiator = mce_err->initiator;
  111. mce->severity = mce_err->severity;
  112. /*
  113. * Populate the mce error_type and type-specific error_type.
  114. */
  115. mce_set_error_info(mce, mce_err);
  116. if (!addr)
  117. return;
  118. if (mce->error_type == MCE_ERROR_TYPE_TLB) {
  119. mce->u.tlb_error.effective_address_provided = true;
  120. mce->u.tlb_error.effective_address = addr;
  121. } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
  122. mce->u.slb_error.effective_address_provided = true;
  123. mce->u.slb_error.effective_address = addr;
  124. } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
  125. mce->u.erat_error.effective_address_provided = true;
  126. mce->u.erat_error.effective_address = addr;
  127. } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
  128. mce->u.user_error.effective_address_provided = true;
  129. mce->u.user_error.effective_address = addr;
  130. } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
  131. mce->u.ra_error.effective_address_provided = true;
  132. mce->u.ra_error.effective_address = addr;
  133. } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
  134. mce->u.link_error.effective_address_provided = true;
  135. mce->u.link_error.effective_address = addr;
  136. } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
  137. mce->u.ue_error.effective_address_provided = true;
  138. mce->u.ue_error.effective_address = addr;
  139. if (phys_addr != ULONG_MAX) {
  140. mce->u.ue_error.physical_address_provided = true;
  141. mce->u.ue_error.physical_address = phys_addr;
  142. machine_check_ue_event(mce);
  143. }
  144. }
  145. return;
  146. }
  147. /*
  148. * get_mce_event:
  149. * mce Pointer to machine_check_event structure to be filled.
  150. * release Flag to indicate whether to free the event slot or not.
  151. * 0 <= do not release the mce event. Caller will invoke
  152. * release_mce_event() once event has been consumed.
  153. * 1 <= release the slot.
  154. *
  155. * return 1 = success
  156. * 0 = failure
  157. *
  158. * get_mce_event() will be called by platform specific machine check
  159. * handle routine and in KVM.
  160. * When we call get_mce_event(), we are still in interrupt context and
  161. * preemption will not be scheduled until ret_from_expect() routine
  162. * is called.
  163. */
  164. int get_mce_event(struct machine_check_event *mce, bool release)
  165. {
  166. int index = __this_cpu_read(mce_nest_count) - 1;
  167. struct machine_check_event *mc_evt;
  168. int ret = 0;
  169. /* Sanity check */
  170. if (index < 0)
  171. return ret;
  172. /* Check if we have MCE info to process. */
  173. if (index < MAX_MC_EVT) {
  174. mc_evt = this_cpu_ptr(&mce_event[index]);
  175. /* Copy the event structure and release the original */
  176. if (mce)
  177. *mce = *mc_evt;
  178. if (release)
  179. mc_evt->in_use = 0;
  180. ret = 1;
  181. }
  182. /* Decrement the count to free the slot. */
  183. if (release)
  184. __this_cpu_dec(mce_nest_count);
  185. return ret;
  186. }
  187. void release_mce_event(void)
  188. {
  189. get_mce_event(NULL, true);
  190. }
  191. static void machine_check_ue_irq_work(struct irq_work *work)
  192. {
  193. schedule_work(&mce_ue_event_work);
  194. }
  195. /*
  196. * Queue up the MCE event which then can be handled later.
  197. */
  198. void machine_check_ue_event(struct machine_check_event *evt)
  199. {
  200. int index;
  201. index = __this_cpu_inc_return(mce_ue_count) - 1;
  202. /* If queue is full, just return for now. */
  203. if (index >= MAX_MC_EVT) {
  204. __this_cpu_dec(mce_ue_count);
  205. return;
  206. }
  207. memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
  208. /* Queue work to process this event later. */
  209. irq_work_queue(&mce_ue_event_irq_work);
  210. }
  211. /*
  212. * Queue up the MCE event which then can be handled later.
  213. */
  214. void machine_check_queue_event(void)
  215. {
  216. int index;
  217. struct machine_check_event evt;
  218. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  219. return;
  220. index = __this_cpu_inc_return(mce_queue_count) - 1;
  221. /* If queue is full, just return for now. */
  222. if (index >= MAX_MC_EVT) {
  223. __this_cpu_dec(mce_queue_count);
  224. return;
  225. }
  226. memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
  227. /* Queue irq work to process this event later. */
  228. irq_work_queue(&mce_event_process_work);
  229. }
  230. /*
  231. * process pending MCE event from the mce event queue. This function will be
  232. * called during syscall exit.
  233. */
  234. static void machine_process_ue_event(struct work_struct *work)
  235. {
  236. int index;
  237. struct machine_check_event *evt;
  238. while (__this_cpu_read(mce_ue_count) > 0) {
  239. index = __this_cpu_read(mce_ue_count) - 1;
  240. evt = this_cpu_ptr(&mce_ue_event_queue[index]);
  241. #ifdef CONFIG_MEMORY_FAILURE
  242. /*
  243. * This should probably queued elsewhere, but
  244. * oh! well
  245. */
  246. if (evt->error_type == MCE_ERROR_TYPE_UE) {
  247. if (evt->u.ue_error.physical_address_provided) {
  248. unsigned long pfn;
  249. pfn = evt->u.ue_error.physical_address >>
  250. PAGE_SHIFT;
  251. memory_failure(pfn, 0);
  252. } else
  253. pr_warn("Failed to identify bad address from "
  254. "where the uncorrectable error (UE) "
  255. "was generated\n");
  256. }
  257. #endif
  258. __this_cpu_dec(mce_ue_count);
  259. }
  260. }
  261. /*
  262. * process pending MCE event from the mce event queue. This function will be
  263. * called during syscall exit.
  264. */
  265. static void machine_check_process_queued_event(struct irq_work *work)
  266. {
  267. int index;
  268. struct machine_check_event *evt;
  269. add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
  270. /*
  271. * For now just print it to console.
  272. * TODO: log this error event to FSP or nvram.
  273. */
  274. while (__this_cpu_read(mce_queue_count) > 0) {
  275. index = __this_cpu_read(mce_queue_count) - 1;
  276. evt = this_cpu_ptr(&mce_event_queue[index]);
  277. machine_check_print_event_info(evt, false);
  278. __this_cpu_dec(mce_queue_count);
  279. }
  280. }
  281. void machine_check_print_event_info(struct machine_check_event *evt,
  282. bool user_mode)
  283. {
  284. const char *level, *sevstr, *subtype;
  285. static const char *mc_ue_types[] = {
  286. "Indeterminate",
  287. "Instruction fetch",
  288. "Page table walk ifetch",
  289. "Load/Store",
  290. "Page table walk Load/Store",
  291. };
  292. static const char *mc_slb_types[] = {
  293. "Indeterminate",
  294. "Parity",
  295. "Multihit",
  296. };
  297. static const char *mc_erat_types[] = {
  298. "Indeterminate",
  299. "Parity",
  300. "Multihit",
  301. };
  302. static const char *mc_tlb_types[] = {
  303. "Indeterminate",
  304. "Parity",
  305. "Multihit",
  306. };
  307. static const char *mc_user_types[] = {
  308. "Indeterminate",
  309. "tlbie(l) invalid",
  310. };
  311. static const char *mc_ra_types[] = {
  312. "Indeterminate",
  313. "Instruction fetch (bad)",
  314. "Instruction fetch (foreign)",
  315. "Page table walk ifetch (bad)",
  316. "Page table walk ifetch (foreign)",
  317. "Load (bad)",
  318. "Store (bad)",
  319. "Page table walk Load/Store (bad)",
  320. "Page table walk Load/Store (foreign)",
  321. "Load/Store (foreign)",
  322. };
  323. static const char *mc_link_types[] = {
  324. "Indeterminate",
  325. "Instruction fetch (timeout)",
  326. "Page table walk ifetch (timeout)",
  327. "Load (timeout)",
  328. "Store (timeout)",
  329. "Page table walk Load/Store (timeout)",
  330. };
  331. /* Print things out */
  332. if (evt->version != MCE_V1) {
  333. pr_err("Machine Check Exception, Unknown event version %d !\n",
  334. evt->version);
  335. return;
  336. }
  337. switch (evt->severity) {
  338. case MCE_SEV_NO_ERROR:
  339. level = KERN_INFO;
  340. sevstr = "Harmless";
  341. break;
  342. case MCE_SEV_WARNING:
  343. level = KERN_WARNING;
  344. sevstr = "";
  345. break;
  346. case MCE_SEV_ERROR_SYNC:
  347. level = KERN_ERR;
  348. sevstr = "Severe";
  349. break;
  350. case MCE_SEV_FATAL:
  351. default:
  352. level = KERN_ERR;
  353. sevstr = "Fatal";
  354. break;
  355. }
  356. printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
  357. evt->disposition == MCE_DISPOSITION_RECOVERED ?
  358. "Recovered" : "Not recovered");
  359. if (user_mode) {
  360. printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
  361. evt->srr0, current->pid, current->comm);
  362. } else {
  363. printk("%s NIP [%016llx]: %pS\n", level, evt->srr0,
  364. (void *)evt->srr0);
  365. }
  366. printk("%s Initiator: %s\n", level,
  367. evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
  368. switch (evt->error_type) {
  369. case MCE_ERROR_TYPE_UE:
  370. subtype = evt->u.ue_error.ue_error_type <
  371. ARRAY_SIZE(mc_ue_types) ?
  372. mc_ue_types[evt->u.ue_error.ue_error_type]
  373. : "Unknown";
  374. printk("%s Error type: UE [%s]\n", level, subtype);
  375. if (evt->u.ue_error.effective_address_provided)
  376. printk("%s Effective address: %016llx\n",
  377. level, evt->u.ue_error.effective_address);
  378. if (evt->u.ue_error.physical_address_provided)
  379. printk("%s Physical address: %016llx\n",
  380. level, evt->u.ue_error.physical_address);
  381. break;
  382. case MCE_ERROR_TYPE_SLB:
  383. subtype = evt->u.slb_error.slb_error_type <
  384. ARRAY_SIZE(mc_slb_types) ?
  385. mc_slb_types[evt->u.slb_error.slb_error_type]
  386. : "Unknown";
  387. printk("%s Error type: SLB [%s]\n", level, subtype);
  388. if (evt->u.slb_error.effective_address_provided)
  389. printk("%s Effective address: %016llx\n",
  390. level, evt->u.slb_error.effective_address);
  391. break;
  392. case MCE_ERROR_TYPE_ERAT:
  393. subtype = evt->u.erat_error.erat_error_type <
  394. ARRAY_SIZE(mc_erat_types) ?
  395. mc_erat_types[evt->u.erat_error.erat_error_type]
  396. : "Unknown";
  397. printk("%s Error type: ERAT [%s]\n", level, subtype);
  398. if (evt->u.erat_error.effective_address_provided)
  399. printk("%s Effective address: %016llx\n",
  400. level, evt->u.erat_error.effective_address);
  401. break;
  402. case MCE_ERROR_TYPE_TLB:
  403. subtype = evt->u.tlb_error.tlb_error_type <
  404. ARRAY_SIZE(mc_tlb_types) ?
  405. mc_tlb_types[evt->u.tlb_error.tlb_error_type]
  406. : "Unknown";
  407. printk("%s Error type: TLB [%s]\n", level, subtype);
  408. if (evt->u.tlb_error.effective_address_provided)
  409. printk("%s Effective address: %016llx\n",
  410. level, evt->u.tlb_error.effective_address);
  411. break;
  412. case MCE_ERROR_TYPE_USER:
  413. subtype = evt->u.user_error.user_error_type <
  414. ARRAY_SIZE(mc_user_types) ?
  415. mc_user_types[evt->u.user_error.user_error_type]
  416. : "Unknown";
  417. printk("%s Error type: User [%s]\n", level, subtype);
  418. if (evt->u.user_error.effective_address_provided)
  419. printk("%s Effective address: %016llx\n",
  420. level, evt->u.user_error.effective_address);
  421. break;
  422. case MCE_ERROR_TYPE_RA:
  423. subtype = evt->u.ra_error.ra_error_type <
  424. ARRAY_SIZE(mc_ra_types) ?
  425. mc_ra_types[evt->u.ra_error.ra_error_type]
  426. : "Unknown";
  427. printk("%s Error type: Real address [%s]\n", level, subtype);
  428. if (evt->u.ra_error.effective_address_provided)
  429. printk("%s Effective address: %016llx\n",
  430. level, evt->u.ra_error.effective_address);
  431. break;
  432. case MCE_ERROR_TYPE_LINK:
  433. subtype = evt->u.link_error.link_error_type <
  434. ARRAY_SIZE(mc_link_types) ?
  435. mc_link_types[evt->u.link_error.link_error_type]
  436. : "Unknown";
  437. printk("%s Error type: Link [%s]\n", level, subtype);
  438. if (evt->u.link_error.effective_address_provided)
  439. printk("%s Effective address: %016llx\n",
  440. level, evt->u.link_error.effective_address);
  441. break;
  442. default:
  443. case MCE_ERROR_TYPE_UNKNOWN:
  444. printk("%s Error type: Unknown\n", level);
  445. break;
  446. }
  447. }
  448. EXPORT_SYMBOL_GPL(machine_check_print_event_info);
  449. /*
  450. * This function is called in real mode. Strictly no printk's please.
  451. *
  452. * regs->nip and regs->msr contains srr0 and ssr1.
  453. */
  454. long machine_check_early(struct pt_regs *regs)
  455. {
  456. long handled = 0;
  457. __this_cpu_inc(irq_stat.mce_exceptions);
  458. if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
  459. handled = cur_cpu_spec->machine_check_early(regs);
  460. return handled;
  461. }
  462. /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
  463. static enum {
  464. DTRIG_UNKNOWN,
  465. DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
  466. DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
  467. } hmer_debug_trig_function;
  468. static int init_debug_trig_function(void)
  469. {
  470. int pvr;
  471. struct device_node *cpun;
  472. struct property *prop = NULL;
  473. const char *str;
  474. /* First look in the device tree */
  475. preempt_disable();
  476. cpun = of_get_cpu_node(smp_processor_id(), NULL);
  477. if (cpun) {
  478. of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
  479. prop, str) {
  480. if (strcmp(str, "bit17-vector-ci-load") == 0)
  481. hmer_debug_trig_function = DTRIG_VECTOR_CI;
  482. else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
  483. hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
  484. }
  485. of_node_put(cpun);
  486. }
  487. preempt_enable();
  488. /* If we found the property, don't look at PVR */
  489. if (prop)
  490. goto out;
  491. pvr = mfspr(SPRN_PVR);
  492. /* Check for POWER9 Nimbus (scale-out) */
  493. if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
  494. /* DD2.2 and later */
  495. if ((pvr & 0xfff) >= 0x202)
  496. hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
  497. /* DD2.0 and DD2.1 - used for vector CI load emulation */
  498. else if ((pvr & 0xfff) >= 0x200)
  499. hmer_debug_trig_function = DTRIG_VECTOR_CI;
  500. }
  501. out:
  502. switch (hmer_debug_trig_function) {
  503. case DTRIG_VECTOR_CI:
  504. pr_debug("HMI debug trigger used for vector CI load\n");
  505. break;
  506. case DTRIG_SUSPEND_ESCAPE:
  507. pr_debug("HMI debug trigger used for TM suspend escape\n");
  508. break;
  509. default:
  510. break;
  511. }
  512. return 0;
  513. }
  514. __initcall(init_debug_trig_function);
  515. /*
  516. * Handle HMIs that occur as a result of a debug trigger.
  517. * Return values:
  518. * -1 means this is not a HMI cause that we know about
  519. * 0 means no further handling is required
  520. * 1 means further handling is required
  521. */
  522. long hmi_handle_debugtrig(struct pt_regs *regs)
  523. {
  524. unsigned long hmer = mfspr(SPRN_HMER);
  525. long ret = 0;
  526. /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
  527. if (!((hmer & HMER_DEBUG_TRIG)
  528. && hmer_debug_trig_function != DTRIG_UNKNOWN))
  529. return -1;
  530. hmer &= ~HMER_DEBUG_TRIG;
  531. /* HMER is a write-AND register */
  532. mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
  533. switch (hmer_debug_trig_function) {
  534. case DTRIG_VECTOR_CI:
  535. /*
  536. * Now to avoid problems with soft-disable we
  537. * only do the emulation if we are coming from
  538. * host user space
  539. */
  540. if (regs && user_mode(regs))
  541. ret = local_paca->hmi_p9_special_emu = 1;
  542. break;
  543. default:
  544. break;
  545. }
  546. /*
  547. * See if any other HMI causes remain to be handled
  548. */
  549. if (hmer & mfspr(SPRN_HMEER))
  550. return -1;
  551. return ret;
  552. }
  553. /*
  554. * Return values:
  555. */
  556. long hmi_exception_realmode(struct pt_regs *regs)
  557. {
  558. int ret;
  559. __this_cpu_inc(irq_stat.hmi_exceptions);
  560. ret = hmi_handle_debugtrig(regs);
  561. if (ret >= 0)
  562. return ret;
  563. wait_for_subcore_guest_exit();
  564. if (ppc_md.hmi_exception_early)
  565. ppc_md.hmi_exception_early(regs);
  566. wait_for_tb_resync();
  567. return 1;
  568. }