traps.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. /*
  2. * linux/arch/m68k/kernel/traps.c
  3. *
  4. * Copyright (C) 1993, 1994 by Hamish Macdonald
  5. *
  6. * 68040 fixes by Michael Rausch
  7. * 68040 fixes by Martin Apel
  8. * 68040 fixes and writeback by Richard Zidlicky
  9. * 68060 fixes by Roman Hodek
  10. * 68060 fixes by Jesper Skov
  11. *
  12. * This file is subject to the terms and conditions of the GNU General Public
  13. * License. See the file COPYING in the main directory of this archive
  14. * for more details.
  15. */
  16. /*
  17. * Sets up all exception vectors
  18. */
  19. #include <linux/sched.h>
  20. #include <linux/sched/debug.h>
  21. #include <linux/signal.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/module.h>
  25. #include <linux/user.h>
  26. #include <linux/string.h>
  27. #include <linux/linkage.h>
  28. #include <linux/init.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/kallsyms.h>
  31. #include <linux/extable.h>
  32. #include <asm/setup.h>
  33. #include <asm/fpu.h>
  34. #include <linux/uaccess.h>
  35. #include <asm/traps.h>
  36. #include <asm/machdep.h>
  37. #include <asm/processor.h>
  38. #include <asm/siginfo.h>
  39. #include <asm/tlbflush.h>
  40. #include "traps.h"
  41. #include "../mm/fault.h"
  42. static const char *vec_names[] = {
  43. [VEC_RESETSP] = "RESET SP",
  44. [VEC_RESETPC] = "RESET PC",
  45. [VEC_BUSERR] = "BUS ERROR",
  46. [VEC_ADDRERR] = "ADDRESS ERROR",
  47. [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
  48. [VEC_ZERODIV] = "ZERO DIVIDE",
  49. [VEC_CHK] = "CHK",
  50. [VEC_TRAP] = "TRAPcc",
  51. [VEC_PRIV] = "PRIVILEGE VIOLATION",
  52. [VEC_TRACE] = "TRACE",
  53. [VEC_LINE10] = "LINE 1010",
  54. [VEC_LINE11] = "LINE 1111",
  55. [VEC_RESV12] = "UNASSIGNED RESERVED 12",
  56. [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
  57. [VEC_FORMAT] = "FORMAT ERROR",
  58. [VEC_UNINT] = "UNINITIALIZED INTERRUPT",
  59. [VEC_RESV16] = "UNASSIGNED RESERVED 16",
  60. [VEC_RESV17] = "UNASSIGNED RESERVED 17",
  61. [VEC_RESV18] = "UNASSIGNED RESERVED 18",
  62. [VEC_RESV19] = "UNASSIGNED RESERVED 19",
  63. [VEC_RESV20] = "UNASSIGNED RESERVED 20",
  64. [VEC_RESV21] = "UNASSIGNED RESERVED 21",
  65. [VEC_RESV22] = "UNASSIGNED RESERVED 22",
  66. [VEC_RESV23] = "UNASSIGNED RESERVED 23",
  67. [VEC_SPUR] = "SPURIOUS INTERRUPT",
  68. [VEC_INT1] = "LEVEL 1 INT",
  69. [VEC_INT2] = "LEVEL 2 INT",
  70. [VEC_INT3] = "LEVEL 3 INT",
  71. [VEC_INT4] = "LEVEL 4 INT",
  72. [VEC_INT5] = "LEVEL 5 INT",
  73. [VEC_INT6] = "LEVEL 6 INT",
  74. [VEC_INT7] = "LEVEL 7 INT",
  75. [VEC_SYS] = "SYSCALL",
  76. [VEC_TRAP1] = "TRAP #1",
  77. [VEC_TRAP2] = "TRAP #2",
  78. [VEC_TRAP3] = "TRAP #3",
  79. [VEC_TRAP4] = "TRAP #4",
  80. [VEC_TRAP5] = "TRAP #5",
  81. [VEC_TRAP6] = "TRAP #6",
  82. [VEC_TRAP7] = "TRAP #7",
  83. [VEC_TRAP8] = "TRAP #8",
  84. [VEC_TRAP9] = "TRAP #9",
  85. [VEC_TRAP10] = "TRAP #10",
  86. [VEC_TRAP11] = "TRAP #11",
  87. [VEC_TRAP12] = "TRAP #12",
  88. [VEC_TRAP13] = "TRAP #13",
  89. [VEC_TRAP14] = "TRAP #14",
  90. [VEC_TRAP15] = "TRAP #15",
  91. [VEC_FPBRUC] = "FPCP BSUN",
  92. [VEC_FPIR] = "FPCP INEXACT",
  93. [VEC_FPDIVZ] = "FPCP DIV BY 0",
  94. [VEC_FPUNDER] = "FPCP UNDERFLOW",
  95. [VEC_FPOE] = "FPCP OPERAND ERROR",
  96. [VEC_FPOVER] = "FPCP OVERFLOW",
  97. [VEC_FPNAN] = "FPCP SNAN",
  98. [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
  99. [VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
  100. [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
  101. [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
  102. [VEC_RESV59] = "UNASSIGNED RESERVED 59",
  103. [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
  104. [VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
  105. [VEC_RESV62] = "UNASSIGNED RESERVED 62",
  106. [VEC_RESV63] = "UNASSIGNED RESERVED 63",
  107. };
  108. static const char *space_names[] = {
  109. [0] = "Space 0",
  110. [USER_DATA] = "User Data",
  111. [USER_PROGRAM] = "User Program",
  112. #ifndef CONFIG_SUN3
  113. [3] = "Space 3",
  114. #else
  115. [FC_CONTROL] = "Control",
  116. #endif
  117. [4] = "Space 4",
  118. [SUPER_DATA] = "Super Data",
  119. [SUPER_PROGRAM] = "Super Program",
  120. [CPU_SPACE] = "CPU"
  121. };
  122. void die_if_kernel(char *,struct pt_regs *,int);
  123. asmlinkage void trap_c(struct frame *fp);
  124. #if defined (CONFIG_M68060)
  125. static inline void access_error060 (struct frame *fp)
  126. {
  127. unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
  128. pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
  129. if (fslw & MMU060_BPE) {
  130. /* branch prediction error -> clear branch cache */
  131. __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
  132. "orl #0x00400000,%/d0\n\t"
  133. "movec %/d0,%/cacr"
  134. : : : "d0" );
  135. /* return if there's no other error */
  136. if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
  137. return;
  138. }
  139. if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
  140. unsigned long errorcode;
  141. unsigned long addr = fp->un.fmt4.effaddr;
  142. if (fslw & MMU060_MA)
  143. addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
  144. errorcode = 1;
  145. if (fslw & MMU060_DESC_ERR) {
  146. __flush_tlb040_one(addr);
  147. errorcode = 0;
  148. }
  149. if (fslw & MMU060_W)
  150. errorcode |= 2;
  151. pr_debug("errorcode = %ld\n", errorcode);
  152. do_page_fault(&fp->ptregs, addr, errorcode);
  153. } else if (fslw & (MMU060_SEE)){
  154. /* Software Emulation Error.
  155. * fault during mem_read/mem_write in ifpsp060/os.S
  156. */
  157. send_fault_sig(&fp->ptregs);
  158. } else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
  159. send_fault_sig(&fp->ptregs) > 0) {
  160. pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc,
  161. fp->un.fmt4.effaddr);
  162. pr_err("68060 access error, fslw=%lx\n", fslw);
  163. trap_c( fp );
  164. }
  165. }
  166. #endif /* CONFIG_M68060 */
  167. #if defined (CONFIG_M68040)
  168. static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
  169. {
  170. unsigned long mmusr;
  171. set_fc(wbs);
  172. if (iswrite)
  173. asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
  174. else
  175. asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
  176. asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
  177. set_fc(USER_DATA);
  178. return mmusr;
  179. }
  180. static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
  181. unsigned long wbd)
  182. {
  183. int res = 0;
  184. set_fc(wbs);
  185. switch (wbs & WBSIZ_040) {
  186. case BA_SIZE_BYTE:
  187. res = put_user(wbd & 0xff, (char __user *)wba);
  188. break;
  189. case BA_SIZE_WORD:
  190. res = put_user(wbd & 0xffff, (short __user *)wba);
  191. break;
  192. case BA_SIZE_LONG:
  193. res = put_user(wbd, (int __user *)wba);
  194. break;
  195. }
  196. set_fc(USER_DATA);
  197. pr_debug("do_040writeback1, res=%d\n", res);
  198. return res;
  199. }
  200. /* after an exception in a writeback the stack frame corresponding
  201. * to that exception is discarded, set a few bits in the old frame
  202. * to simulate what it should look like
  203. */
  204. static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
  205. {
  206. fp->un.fmt7.faddr = wba;
  207. fp->un.fmt7.ssw = wbs & 0xff;
  208. if (wba != current->thread.faddr)
  209. fp->un.fmt7.ssw |= MA_040;
  210. }
  211. static inline void do_040writebacks(struct frame *fp)
  212. {
  213. int res = 0;
  214. #if 0
  215. if (fp->un.fmt7.wb1s & WBV_040)
  216. pr_err("access_error040: cannot handle 1st writeback. oops.\n");
  217. #endif
  218. if ((fp->un.fmt7.wb2s & WBV_040) &&
  219. !(fp->un.fmt7.wb2s & WBTT_040)) {
  220. res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
  221. fp->un.fmt7.wb2d);
  222. if (res)
  223. fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
  224. else
  225. fp->un.fmt7.wb2s = 0;
  226. }
  227. /* do the 2nd wb only if the first one was successful (except for a kernel wb) */
  228. if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
  229. res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
  230. fp->un.fmt7.wb3d);
  231. if (res)
  232. {
  233. fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
  234. fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
  235. fp->un.fmt7.wb3s &= (~WBV_040);
  236. fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
  237. fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
  238. }
  239. else
  240. fp->un.fmt7.wb3s = 0;
  241. }
  242. if (res)
  243. send_fault_sig(&fp->ptregs);
  244. }
  245. /*
  246. * called from sigreturn(), must ensure userspace code didn't
  247. * manipulate exception frame to circumvent protection, then complete
  248. * pending writebacks
  249. * we just clear TM2 to turn it into a userspace access
  250. */
  251. asmlinkage void berr_040cleanup(struct frame *fp)
  252. {
  253. fp->un.fmt7.wb2s &= ~4;
  254. fp->un.fmt7.wb3s &= ~4;
  255. do_040writebacks(fp);
  256. }
  257. static inline void access_error040(struct frame *fp)
  258. {
  259. unsigned short ssw = fp->un.fmt7.ssw;
  260. unsigned long mmusr;
  261. pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
  262. pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
  263. fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
  264. pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
  265. fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
  266. fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
  267. if (ssw & ATC_040) {
  268. unsigned long addr = fp->un.fmt7.faddr;
  269. unsigned long errorcode;
  270. /*
  271. * The MMU status has to be determined AFTER the address
  272. * has been corrected if there was a misaligned access (MA).
  273. */
  274. if (ssw & MA_040)
  275. addr = (addr + 7) & -8;
  276. /* MMU error, get the MMUSR info for this access */
  277. mmusr = probe040(!(ssw & RW_040), addr, ssw);
  278. pr_debug("mmusr = %lx\n", mmusr);
  279. errorcode = 1;
  280. if (!(mmusr & MMU_R_040)) {
  281. /* clear the invalid atc entry */
  282. __flush_tlb040_one(addr);
  283. errorcode = 0;
  284. }
  285. /* despite what documentation seems to say, RMW
  286. * accesses have always both the LK and RW bits set */
  287. if (!(ssw & RW_040) || (ssw & LK_040))
  288. errorcode |= 2;
  289. if (do_page_fault(&fp->ptregs, addr, errorcode)) {
  290. pr_debug("do_page_fault() !=0\n");
  291. if (user_mode(&fp->ptregs)){
  292. /* delay writebacks after signal delivery */
  293. pr_debug(".. was usermode - return\n");
  294. return;
  295. }
  296. /* disable writeback into user space from kernel
  297. * (if do_page_fault didn't fix the mapping,
  298. * the writeback won't do good)
  299. */
  300. disable_wb:
  301. pr_debug(".. disabling wb2\n");
  302. if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
  303. fp->un.fmt7.wb2s &= ~WBV_040;
  304. if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
  305. fp->un.fmt7.wb3s &= ~WBV_040;
  306. }
  307. } else {
  308. /* In case of a bus error we either kill the process or expect
  309. * the kernel to catch the fault, which then is also responsible
  310. * for cleaning up the mess.
  311. */
  312. current->thread.signo = SIGBUS;
  313. current->thread.faddr = fp->un.fmt7.faddr;
  314. if (send_fault_sig(&fp->ptregs) >= 0)
  315. pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
  316. fp->un.fmt7.faddr);
  317. goto disable_wb;
  318. }
  319. do_040writebacks(fp);
  320. }
  321. #endif /* CONFIG_M68040 */
  322. #if defined(CONFIG_SUN3)
  323. #include <asm/sun3mmu.h>
  324. #include "../sun3/sun3.h"
  325. /* sun3 version of bus_error030 */
  326. static inline void bus_error030 (struct frame *fp)
  327. {
  328. unsigned char buserr_type = sun3_get_buserr ();
  329. unsigned long addr, errorcode;
  330. unsigned short ssw = fp->un.fmtb.ssw;
  331. extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
  332. if (ssw & (FC | FB))
  333. pr_debug("Instruction fault at %#010lx\n",
  334. ssw & FC ?
  335. fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
  336. :
  337. fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
  338. if (ssw & DF)
  339. pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  340. ssw & RW ? "read" : "write",
  341. fp->un.fmtb.daddr,
  342. space_names[ssw & DFC], fp->ptregs.pc);
  343. /*
  344. * Check if this page should be demand-mapped. This needs to go before
  345. * the testing for a bad kernel-space access (demand-mapping applies
  346. * to kernel accesses too).
  347. */
  348. if ((ssw & DF)
  349. && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
  350. if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
  351. return;
  352. }
  353. /* Check for kernel-space pagefault (BAD). */
  354. if (fp->ptregs.sr & PS_S) {
  355. /* kernel fault must be a data fault to user space */
  356. if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
  357. // try checking the kernel mappings before surrender
  358. if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
  359. return;
  360. /* instruction fault or kernel data fault! */
  361. if (ssw & (FC | FB))
  362. pr_err("Instruction fault at %#010lx\n",
  363. fp->ptregs.pc);
  364. if (ssw & DF) {
  365. /* was this fault incurred testing bus mappings? */
  366. if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
  367. (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
  368. send_fault_sig(&fp->ptregs);
  369. return;
  370. }
  371. pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  372. ssw & RW ? "read" : "write",
  373. fp->un.fmtb.daddr,
  374. space_names[ssw & DFC], fp->ptregs.pc);
  375. }
  376. pr_err("BAD KERNEL BUSERR\n");
  377. die_if_kernel("Oops", &fp->ptregs,0);
  378. force_sig(SIGKILL);
  379. return;
  380. }
  381. } else {
  382. /* user fault */
  383. if (!(ssw & (FC | FB)) && !(ssw & DF))
  384. /* not an instruction fault or data fault! BAD */
  385. panic ("USER BUSERR w/o instruction or data fault");
  386. }
  387. /* First handle the data fault, if any. */
  388. if (ssw & DF) {
  389. addr = fp->un.fmtb.daddr;
  390. // errorcode bit 0: 0 -> no page 1 -> protection fault
  391. // errorcode bit 1: 0 -> read fault 1 -> write fault
  392. // (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
  393. // (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
  394. if (buserr_type & SUN3_BUSERR_PROTERR)
  395. errorcode = 0x01;
  396. else if (buserr_type & SUN3_BUSERR_INVALID)
  397. errorcode = 0x00;
  398. else {
  399. pr_debug("*** unexpected busfault type=%#04x\n",
  400. buserr_type);
  401. pr_debug("invalid %s access at %#lx from pc %#lx\n",
  402. !(ssw & RW) ? "write" : "read", addr,
  403. fp->ptregs.pc);
  404. die_if_kernel ("Oops", &fp->ptregs, buserr_type);
  405. force_sig (SIGBUS);
  406. return;
  407. }
  408. //todo: wtf is RM bit? --m
  409. if (!(ssw & RW) || ssw & RM)
  410. errorcode |= 0x02;
  411. /* Handle page fault. */
  412. do_page_fault (&fp->ptregs, addr, errorcode);
  413. /* Retry the data fault now. */
  414. return;
  415. }
  416. /* Now handle the instruction fault. */
  417. /* Get the fault address. */
  418. if (fp->ptregs.format == 0xA)
  419. addr = fp->ptregs.pc + 4;
  420. else
  421. addr = fp->un.fmtb.baddr;
  422. if (ssw & FC)
  423. addr -= 2;
  424. if (buserr_type & SUN3_BUSERR_INVALID) {
  425. if (!mmu_emu_handle_fault(addr, 1, 0))
  426. do_page_fault (&fp->ptregs, addr, 0);
  427. } else {
  428. pr_debug("protection fault on insn access (segv).\n");
  429. force_sig (SIGSEGV);
  430. }
  431. }
  432. #else
  433. #if defined(CPU_M68020_OR_M68030)
  434. static inline void bus_error030 (struct frame *fp)
  435. {
  436. volatile unsigned short temp;
  437. unsigned short mmusr;
  438. unsigned long addr, errorcode;
  439. unsigned short ssw = fp->un.fmtb.ssw;
  440. #ifdef DEBUG
  441. unsigned long desc;
  442. #endif
  443. pr_debug("pid = %x ", current->pid);
  444. pr_debug("SSW=%#06x ", ssw);
  445. if (ssw & (FC | FB))
  446. pr_debug("Instruction fault at %#010lx\n",
  447. ssw & FC ?
  448. fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
  449. :
  450. fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
  451. if (ssw & DF)
  452. pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  453. ssw & RW ? "read" : "write",
  454. fp->un.fmtb.daddr,
  455. space_names[ssw & DFC], fp->ptregs.pc);
  456. /* ++andreas: If a data fault and an instruction fault happen
  457. at the same time map in both pages. */
  458. /* First handle the data fault, if any. */
  459. if (ssw & DF) {
  460. addr = fp->un.fmtb.daddr;
  461. #ifdef DEBUG
  462. asm volatile ("ptestr %3,%2@,#7,%0\n\t"
  463. "pmove %%psr,%1"
  464. : "=a&" (desc), "=m" (temp)
  465. : "a" (addr), "d" (ssw));
  466. pr_debug("mmusr is %#x for addr %#lx in task %p\n",
  467. temp, addr, current);
  468. pr_debug("descriptor address is 0x%p, contents %#lx\n",
  469. __va(desc), *(unsigned long *)__va(desc));
  470. #else
  471. asm volatile ("ptestr %2,%1@,#7\n\t"
  472. "pmove %%psr,%0"
  473. : "=m" (temp) : "a" (addr), "d" (ssw));
  474. #endif
  475. mmusr = temp;
  476. errorcode = (mmusr & MMU_I) ? 0 : 1;
  477. if (!(ssw & RW) || (ssw & RM))
  478. errorcode |= 2;
  479. if (mmusr & (MMU_I | MMU_WP)) {
  480. /* We might have an exception table for this PC */
  481. if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
  482. pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  483. ssw & RW ? "read" : "write",
  484. fp->un.fmtb.daddr,
  485. space_names[ssw & DFC], fp->ptregs.pc);
  486. goto buserr;
  487. }
  488. /* Don't try to do anything further if an exception was
  489. handled. */
  490. if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
  491. return;
  492. } else if (!(mmusr & MMU_I)) {
  493. /* probably a 020 cas fault */
  494. if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
  495. pr_err("unexpected bus error (%#x,%#x)\n", ssw,
  496. mmusr);
  497. } else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
  498. pr_err("invalid %s access at %#lx from pc %#lx\n",
  499. !(ssw & RW) ? "write" : "read", addr,
  500. fp->ptregs.pc);
  501. die_if_kernel("Oops",&fp->ptregs,mmusr);
  502. force_sig(SIGSEGV);
  503. return;
  504. } else {
  505. #if 0
  506. static volatile long tlong;
  507. #endif
  508. pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
  509. !(ssw & RW) ? "write" : "read", addr,
  510. fp->ptregs.pc, ssw);
  511. asm volatile ("ptestr #1,%1@,#0\n\t"
  512. "pmove %%psr,%0"
  513. : "=m" (temp)
  514. : "a" (addr));
  515. mmusr = temp;
  516. pr_err("level 0 mmusr is %#x\n", mmusr);
  517. #if 0
  518. asm volatile ("pmove %%tt0,%0"
  519. : "=m" (tlong));
  520. pr_debug("tt0 is %#lx, ", tlong);
  521. asm volatile ("pmove %%tt1,%0"
  522. : "=m" (tlong));
  523. pr_debug("tt1 is %#lx\n", tlong);
  524. #endif
  525. pr_debug("Unknown SIGSEGV - 1\n");
  526. die_if_kernel("Oops",&fp->ptregs,mmusr);
  527. force_sig(SIGSEGV);
  528. return;
  529. }
  530. /* setup an ATC entry for the access about to be retried */
  531. if (!(ssw & RW) || (ssw & RM))
  532. asm volatile ("ploadw %1,%0@" : /* no outputs */
  533. : "a" (addr), "d" (ssw));
  534. else
  535. asm volatile ("ploadr %1,%0@" : /* no outputs */
  536. : "a" (addr), "d" (ssw));
  537. }
  538. /* Now handle the instruction fault. */
  539. if (!(ssw & (FC|FB)))
  540. return;
  541. if (fp->ptregs.sr & PS_S) {
  542. pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc);
  543. buserr:
  544. pr_err("BAD KERNEL BUSERR\n");
  545. die_if_kernel("Oops",&fp->ptregs,0);
  546. force_sig(SIGKILL);
  547. return;
  548. }
  549. /* get the fault address */
  550. if (fp->ptregs.format == 10)
  551. addr = fp->ptregs.pc + 4;
  552. else
  553. addr = fp->un.fmtb.baddr;
  554. if (ssw & FC)
  555. addr -= 2;
  556. if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
  557. /* Insn fault on same page as data fault. But we
  558. should still create the ATC entry. */
  559. goto create_atc_entry;
  560. #ifdef DEBUG
  561. asm volatile ("ptestr #1,%2@,#7,%0\n\t"
  562. "pmove %%psr,%1"
  563. : "=a&" (desc), "=m" (temp)
  564. : "a" (addr));
  565. pr_debug("mmusr is %#x for addr %#lx in task %p\n",
  566. temp, addr, current);
  567. pr_debug("descriptor address is 0x%p, contents %#lx\n",
  568. __va(desc), *(unsigned long *)__va(desc));
  569. #else
  570. asm volatile ("ptestr #1,%1@,#7\n\t"
  571. "pmove %%psr,%0"
  572. : "=m" (temp) : "a" (addr));
  573. #endif
  574. mmusr = temp;
  575. if (mmusr & MMU_I)
  576. do_page_fault (&fp->ptregs, addr, 0);
  577. else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
  578. pr_err("invalid insn access at %#lx from pc %#lx\n",
  579. addr, fp->ptregs.pc);
  580. pr_debug("Unknown SIGSEGV - 2\n");
  581. die_if_kernel("Oops",&fp->ptregs,mmusr);
  582. force_sig(SIGSEGV);
  583. return;
  584. }
  585. create_atc_entry:
  586. /* setup an ATC entry for the access about to be retried */
  587. asm volatile ("ploadr #2,%0@" : /* no outputs */
  588. : "a" (addr));
  589. }
  590. #endif /* CPU_M68020_OR_M68030 */
  591. #endif /* !CONFIG_SUN3 */
  592. #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
  593. #include <asm/mcfmmu.h>
  594. /*
  595. * The following table converts the FS encoding of a ColdFire
  596. * exception stack frame into the error_code value needed by
  597. * do_fault.
  598. */
  599. static const unsigned char fs_err_code[] = {
  600. 0, /* 0000 */
  601. 0, /* 0001 */
  602. 0, /* 0010 */
  603. 0, /* 0011 */
  604. 1, /* 0100 */
  605. 0, /* 0101 */
  606. 0, /* 0110 */
  607. 0, /* 0111 */
  608. 2, /* 1000 */
  609. 3, /* 1001 */
  610. 2, /* 1010 */
  611. 0, /* 1011 */
  612. 1, /* 1100 */
  613. 1, /* 1101 */
  614. 0, /* 1110 */
  615. 0 /* 1111 */
  616. };
  617. static inline void access_errorcf(unsigned int fs, struct frame *fp)
  618. {
  619. unsigned long mmusr, addr;
  620. unsigned int err_code;
  621. int need_page_fault;
  622. mmusr = mmu_read(MMUSR);
  623. addr = mmu_read(MMUAR);
  624. /*
  625. * error_code:
  626. * bit 0 == 0 means no page found, 1 means protection fault
  627. * bit 1 == 0 means read, 1 means write
  628. */
  629. switch (fs) {
  630. case 5: /* 0101 TLB opword X miss */
  631. need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
  632. addr = fp->ptregs.pc;
  633. break;
  634. case 6: /* 0110 TLB extension word X miss */
  635. need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
  636. addr = fp->ptregs.pc + sizeof(long);
  637. break;
  638. case 10: /* 1010 TLB W miss */
  639. need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
  640. break;
  641. case 14: /* 1110 TLB R miss */
  642. need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
  643. break;
  644. default:
  645. /* 0000 Normal */
  646. /* 0001 Reserved */
  647. /* 0010 Interrupt during debug service routine */
  648. /* 0011 Reserved */
  649. /* 0100 X Protection */
  650. /* 0111 IFP in emulator mode */
  651. /* 1000 W Protection*/
  652. /* 1001 Write error*/
  653. /* 1011 Reserved*/
  654. /* 1100 R Protection*/
  655. /* 1101 R Protection*/
  656. /* 1111 OEP in emulator mode*/
  657. need_page_fault = 1;
  658. break;
  659. }
  660. if (need_page_fault) {
  661. err_code = fs_err_code[fs];
  662. if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
  663. err_code |= 2; /* bit1 - write, bit0 - protection */
  664. do_page_fault(&fp->ptregs, addr, err_code);
  665. }
  666. }
  667. #endif /* CONFIG_COLDFIRE CONFIG_MMU */
  668. asmlinkage void buserr_c(struct frame *fp)
  669. {
  670. /* Only set esp0 if coming from user mode */
  671. if (user_mode(&fp->ptregs))
  672. current->thread.esp0 = (unsigned long) fp;
  673. pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format);
  674. #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
  675. if (CPU_IS_COLDFIRE) {
  676. unsigned int fs;
  677. fs = (fp->ptregs.vector & 0x3) |
  678. ((fp->ptregs.vector & 0xc00) >> 8);
  679. switch (fs) {
  680. case 0x5:
  681. case 0x6:
  682. case 0x7:
  683. case 0x9:
  684. case 0xa:
  685. case 0xd:
  686. case 0xe:
  687. case 0xf:
  688. access_errorcf(fs, fp);
  689. return;
  690. default:
  691. break;
  692. }
  693. }
  694. #endif /* CONFIG_COLDFIRE && CONFIG_MMU */
  695. switch (fp->ptregs.format) {
  696. #if defined (CONFIG_M68060)
  697. case 4: /* 68060 access error */
  698. access_error060 (fp);
  699. break;
  700. #endif
  701. #if defined (CONFIG_M68040)
  702. case 0x7: /* 68040 access error */
  703. access_error040 (fp);
  704. break;
  705. #endif
  706. #if defined (CPU_M68020_OR_M68030)
  707. case 0xa:
  708. case 0xb:
  709. bus_error030 (fp);
  710. break;
  711. #endif
  712. default:
  713. die_if_kernel("bad frame format",&fp->ptregs,0);
  714. pr_debug("Unknown SIGSEGV - 4\n");
  715. force_sig(SIGSEGV);
  716. }
  717. }
  718. static int kstack_depth_to_print = 48;
  719. static void show_trace(unsigned long *stack, const char *loglvl)
  720. {
  721. unsigned long *endstack;
  722. unsigned long addr;
  723. int i;
  724. printk("%sCall Trace:", loglvl);
  725. addr = (unsigned long)stack + THREAD_SIZE - 1;
  726. endstack = (unsigned long *)(addr & -THREAD_SIZE);
  727. i = 0;
  728. while (stack + 1 <= endstack) {
  729. addr = *stack++;
  730. /*
  731. * If the address is either in the text segment of the
  732. * kernel, or in the region which contains vmalloc'ed
  733. * memory, it *may* be the address of a calling
  734. * routine; if so, print it so that someone tracing
  735. * down the cause of the crash will be able to figure
  736. * out the call path that was taken.
  737. */
  738. if (__kernel_text_address(addr)) {
  739. #ifndef CONFIG_KALLSYMS
  740. if (i % 5 == 0)
  741. pr_cont("\n ");
  742. #endif
  743. pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
  744. i++;
  745. }
  746. }
  747. pr_cont("\n");
  748. }
  749. void show_registers(struct pt_regs *regs)
  750. {
  751. struct frame *fp = (struct frame *)regs;
  752. u16 c, *cp;
  753. unsigned long addr;
  754. int i;
  755. print_modules();
  756. pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
  757. pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
  758. pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
  759. regs->d0, regs->d1, regs->d2, regs->d3);
  760. pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
  761. regs->d4, regs->d5, regs->a0, regs->a1);
  762. pr_info("Process %s (pid: %d, task=%p)\n",
  763. current->comm, task_pid_nr(current), current);
  764. addr = (unsigned long)&fp->un;
  765. pr_info("Frame format=%X ", regs->format);
  766. switch (regs->format) {
  767. case 0x2:
  768. pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr);
  769. addr += sizeof(fp->un.fmt2);
  770. break;
  771. case 0x3:
  772. pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr);
  773. addr += sizeof(fp->un.fmt3);
  774. break;
  775. case 0x4:
  776. if (CPU_IS_060)
  777. pr_cont("fault addr=%08lx fslw=%08lx\n",
  778. fp->un.fmt4.effaddr, fp->un.fmt4.pc);
  779. else
  780. pr_cont("eff addr=%08lx pc=%08lx\n",
  781. fp->un.fmt4.effaddr, fp->un.fmt4.pc);
  782. addr += sizeof(fp->un.fmt4);
  783. break;
  784. case 0x7:
  785. pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n",
  786. fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
  787. pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n",
  788. fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
  789. pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n",
  790. fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
  791. pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n",
  792. fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
  793. pr_info("push data: %08lx %08lx %08lx %08lx\n",
  794. fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
  795. fp->un.fmt7.pd3);
  796. addr += sizeof(fp->un.fmt7);
  797. break;
  798. case 0x9:
  799. pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr);
  800. addr += sizeof(fp->un.fmt9);
  801. break;
  802. case 0xa:
  803. pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
  804. fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
  805. fp->un.fmta.daddr, fp->un.fmta.dobuf);
  806. addr += sizeof(fp->un.fmta);
  807. break;
  808. case 0xb:
  809. pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
  810. fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
  811. fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
  812. pr_info("baddr=%08lx dibuf=%08lx ver=%x\n",
  813. fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
  814. addr += sizeof(fp->un.fmtb);
  815. break;
  816. default:
  817. pr_cont("\n");
  818. }
  819. show_stack(NULL, (unsigned long *)addr, KERN_INFO);
  820. pr_info("Code:");
  821. cp = (u16 *)regs->pc;
  822. for (i = -8; i < 16; i++) {
  823. if (get_kernel_nofault(c, cp + i) && i >= 0) {
  824. pr_cont(" Bad PC value.");
  825. break;
  826. }
  827. if (i)
  828. pr_cont(" %04x", c);
  829. else
  830. pr_cont(" <%04x>", c);
  831. }
  832. pr_cont("\n");
  833. }
  834. void show_stack(struct task_struct *task, unsigned long *stack,
  835. const char *loglvl)
  836. {
  837. unsigned long *p;
  838. unsigned long *endstack;
  839. int i;
  840. if (!stack) {
  841. if (task)
  842. stack = (unsigned long *)task->thread.esp0;
  843. else
  844. stack = (unsigned long *)&stack;
  845. }
  846. endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
  847. printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
  848. p = stack;
  849. for (i = 0; i < kstack_depth_to_print; i++) {
  850. if (p + 1 > endstack)
  851. break;
  852. if (i % 8 == 0)
  853. pr_cont("\n ");
  854. pr_cont(" %08lx", *p++);
  855. }
  856. pr_cont("\n");
  857. show_trace(stack, loglvl);
  858. }
  859. /*
  860. * The vector number returned in the frame pointer may also contain
  861. * the "fs" (Fault Status) bits on ColdFire. These are in the bottom
  862. * 2 bits, and upper 2 bits. So we need to mask out the real vector
  863. * number before using it in comparisons. You don't need to do this on
  864. * real 68k parts, but it won't hurt either.
  865. */
  866. static void bad_super_trap(struct frame *fp)
  867. {
  868. int vector = (fp->ptregs.vector >> 2) & 0xff;
  869. console_verbose();
  870. if (vector < ARRAY_SIZE(vec_names))
  871. pr_err("*** %s *** FORMAT=%X\n",
  872. vec_names[vector],
  873. fp->ptregs.format);
  874. else
  875. pr_err("*** Exception %d *** FORMAT=%X\n",
  876. vector, fp->ptregs.format);
  877. if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) {
  878. unsigned short ssw = fp->un.fmtb.ssw;
  879. pr_err("SSW=%#06x ", ssw);
  880. if (ssw & RC)
  881. pr_err("Pipe stage C instruction fault at %#010lx\n",
  882. (fp->ptregs.format) == 0xA ?
  883. fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
  884. if (ssw & RB)
  885. pr_err("Pipe stage B instruction fault at %#010lx\n",
  886. (fp->ptregs.format) == 0xA ?
  887. fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
  888. if (ssw & DF)
  889. pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  890. ssw & RW ? "read" : "write",
  891. fp->un.fmtb.daddr, space_names[ssw & DFC],
  892. fp->ptregs.pc);
  893. }
  894. pr_err("Current process id is %d\n", task_pid_nr(current));
  895. die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
  896. }
  897. asmlinkage void trap_c(struct frame *fp)
  898. {
  899. int sig, si_code;
  900. void __user *addr;
  901. int vector = (fp->ptregs.vector >> 2) & 0xff;
  902. if (fp->ptregs.sr & PS_S) {
  903. if (vector == VEC_TRACE) {
  904. /* traced a trapping instruction on a 68020/30,
  905. * real exception will be executed afterwards.
  906. */
  907. return;
  908. }
  909. #ifdef CONFIG_MMU
  910. if (fixup_exception(&fp->ptregs))
  911. return;
  912. #endif
  913. bad_super_trap(fp);
  914. return;
  915. }
  916. /* send the appropriate signal to the user program */
  917. switch (vector) {
  918. case VEC_ADDRERR:
  919. si_code = BUS_ADRALN;
  920. sig = SIGBUS;
  921. break;
  922. case VEC_ILLEGAL:
  923. case VEC_LINE10:
  924. case VEC_LINE11:
  925. si_code = ILL_ILLOPC;
  926. sig = SIGILL;
  927. break;
  928. case VEC_PRIV:
  929. si_code = ILL_PRVOPC;
  930. sig = SIGILL;
  931. break;
  932. case VEC_COPROC:
  933. si_code = ILL_COPROC;
  934. sig = SIGILL;
  935. break;
  936. case VEC_TRAP1:
  937. case VEC_TRAP2:
  938. case VEC_TRAP3:
  939. case VEC_TRAP4:
  940. case VEC_TRAP5:
  941. case VEC_TRAP6:
  942. case VEC_TRAP7:
  943. case VEC_TRAP8:
  944. case VEC_TRAP9:
  945. case VEC_TRAP10:
  946. case VEC_TRAP11:
  947. case VEC_TRAP12:
  948. case VEC_TRAP13:
  949. case VEC_TRAP14:
  950. si_code = ILL_ILLTRP;
  951. sig = SIGILL;
  952. break;
  953. case VEC_FPBRUC:
  954. case VEC_FPOE:
  955. case VEC_FPNAN:
  956. si_code = FPE_FLTINV;
  957. sig = SIGFPE;
  958. break;
  959. case VEC_FPIR:
  960. si_code = FPE_FLTRES;
  961. sig = SIGFPE;
  962. break;
  963. case VEC_FPDIVZ:
  964. si_code = FPE_FLTDIV;
  965. sig = SIGFPE;
  966. break;
  967. case VEC_FPUNDER:
  968. si_code = FPE_FLTUND;
  969. sig = SIGFPE;
  970. break;
  971. case VEC_FPOVER:
  972. si_code = FPE_FLTOVF;
  973. sig = SIGFPE;
  974. break;
  975. case VEC_ZERODIV:
  976. si_code = FPE_INTDIV;
  977. sig = SIGFPE;
  978. break;
  979. case VEC_CHK:
  980. case VEC_TRAP:
  981. si_code = FPE_INTOVF;
  982. sig = SIGFPE;
  983. break;
  984. case VEC_TRACE: /* ptrace single step */
  985. si_code = TRAP_TRACE;
  986. sig = SIGTRAP;
  987. break;
  988. case VEC_TRAP15: /* breakpoint */
  989. si_code = TRAP_BRKPT;
  990. sig = SIGTRAP;
  991. break;
  992. default:
  993. si_code = ILL_ILLOPC;
  994. sig = SIGILL;
  995. break;
  996. }
  997. switch (fp->ptregs.format) {
  998. default:
  999. addr = (void __user *) fp->ptregs.pc;
  1000. break;
  1001. case 2:
  1002. addr = (void __user *) fp->un.fmt2.iaddr;
  1003. break;
  1004. case 7:
  1005. addr = (void __user *) fp->un.fmt7.effaddr;
  1006. break;
  1007. case 9:
  1008. addr = (void __user *) fp->un.fmt9.iaddr;
  1009. break;
  1010. case 10:
  1011. addr = (void __user *) fp->un.fmta.daddr;
  1012. break;
  1013. case 11:
  1014. addr = (void __user*) fp->un.fmtb.daddr;
  1015. break;
  1016. }
  1017. force_sig_fault(sig, si_code, addr);
  1018. }
  1019. void die_if_kernel (char *str, struct pt_regs *fp, int nr)
  1020. {
  1021. if (!(fp->sr & PS_S))
  1022. return;
  1023. console_verbose();
  1024. pr_crit("%s: %08x\n", str, nr);
  1025. show_registers(fp);
  1026. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  1027. make_task_dead(SIGSEGV);
  1028. }
  1029. asmlinkage void set_esp0(unsigned long ssp)
  1030. {
  1031. current->thread.esp0 = ssp;
  1032. }
  1033. /*
  1034. * This function is called if an error occur while accessing
  1035. * user-space from the fpsp040 code.
  1036. */
  1037. asmlinkage void fpsp040_die(void)
  1038. {
  1039. force_exit_sig(SIGSEGV);
  1040. }
  1041. #ifdef CONFIG_M68KFPU_EMU
  1042. asmlinkage void fpemu_signal(int signal, int code, void *addr)
  1043. {
  1044. force_sig_fault(signal, code, addr);
  1045. }
  1046. #endif