traps.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Author: Huacai Chen <chenhuacai@loongson.cn>
  4. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/bitops.h>
  8. #include <linux/bug.h>
  9. #include <linux/compiler.h>
  10. #include <linux/context_tracking.h>
  11. #include <linux/entry-common.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/kexec.h>
  15. #include <linux/module.h>
  16. #include <linux/extable.h>
  17. #include <linux/mm.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/sched/debug.h>
  20. #include <linux/smp.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/kallsyms.h>
  23. #include <linux/memblock.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/kgdb.h>
  27. #include <linux/kdebug.h>
  28. #include <linux/notifier.h>
  29. #include <linux/irq.h>
  30. #include <linux/perf_event.h>
  31. #include <asm/addrspace.h>
  32. #include <asm/bootinfo.h>
  33. #include <asm/branch.h>
  34. #include <asm/break.h>
  35. #include <asm/cpu.h>
  36. #include <asm/exception.h>
  37. #include <asm/fpu.h>
  38. #include <asm/lbt.h>
  39. #include <asm/inst.h>
  40. #include <asm/kgdb.h>
  41. #include <asm/loongarch.h>
  42. #include <asm/mmu_context.h>
  43. #include <asm/pgtable.h>
  44. #include <asm/ptrace.h>
  45. #include <asm/sections.h>
  46. #include <asm/siginfo.h>
  47. #include <asm/stacktrace.h>
  48. #include <asm/tlb.h>
  49. #include <asm/types.h>
  50. #include <asm/unwind.h>
  51. #include <asm/uprobes.h>
  52. #include "access-helper.h"
  53. void *exception_table[EXCCODE_INT_START] = {
  54. [0 ... EXCCODE_INT_START - 1] = handle_reserved,
  55. [EXCCODE_TLBI] = handle_tlb_load,
  56. [EXCCODE_TLBL] = handle_tlb_load,
  57. [EXCCODE_TLBS] = handle_tlb_store,
  58. [EXCCODE_TLBM] = handle_tlb_modify,
  59. [EXCCODE_TLBNR] = handle_tlb_protect,
  60. [EXCCODE_TLBNX] = handle_tlb_protect,
  61. [EXCCODE_TLBPE] = handle_tlb_protect,
  62. [EXCCODE_ADE] = handle_ade,
  63. [EXCCODE_ALE] = handle_ale,
  64. [EXCCODE_BCE] = handle_bce,
  65. [EXCCODE_SYS] = handle_sys,
  66. [EXCCODE_BP] = handle_bp,
  67. [EXCCODE_INE] = handle_ri,
  68. [EXCCODE_IPE] = handle_ri,
  69. [EXCCODE_FPDIS] = handle_fpu,
  70. [EXCCODE_LSXDIS] = handle_lsx,
  71. [EXCCODE_LASXDIS] = handle_lasx,
  72. [EXCCODE_FPE] = handle_fpe,
  73. [EXCCODE_WATCH] = handle_watch,
  74. [EXCCODE_BTDIS] = handle_lbt,
  75. };
  76. EXPORT_SYMBOL_GPL(exception_table);
  77. static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
  78. const char *loglvl, bool user)
  79. {
  80. unsigned long addr;
  81. struct unwind_state state;
  82. struct pt_regs *pregs = (struct pt_regs *)regs;
  83. if (!task)
  84. task = current;
  85. printk("%sCall Trace:", loglvl);
  86. for (unwind_start(&state, task, pregs);
  87. !unwind_done(&state); unwind_next_frame(&state)) {
  88. addr = unwind_get_return_address(&state);
  89. print_ip_sym(loglvl, addr);
  90. }
  91. printk("%s\n", loglvl);
  92. }
  93. static void show_stacktrace(struct task_struct *task,
  94. const struct pt_regs *regs, const char *loglvl, bool user)
  95. {
  96. int i;
  97. const int field = 2 * sizeof(unsigned long);
  98. unsigned long stackdata;
  99. unsigned long *sp = (unsigned long *)regs->regs[3];
  100. printk("%sStack :", loglvl);
  101. i = 0;
  102. while ((unsigned long) sp & (PAGE_SIZE - 1)) {
  103. if (i && ((i % (64 / field)) == 0)) {
  104. pr_cont("\n");
  105. printk("%s ", loglvl);
  106. }
  107. if (i > 39) {
  108. pr_cont(" ...");
  109. break;
  110. }
  111. if (__get_addr(&stackdata, sp++, user)) {
  112. pr_cont(" (Bad stack address)");
  113. break;
  114. }
  115. pr_cont(" %0*lx", field, stackdata);
  116. i++;
  117. }
  118. pr_cont("\n");
  119. show_backtrace(task, regs, loglvl, user);
  120. }
  121. void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
  122. {
  123. struct pt_regs regs;
  124. regs.csr_crmd = 0;
  125. if (sp) {
  126. regs.csr_era = 0;
  127. regs.regs[1] = 0;
  128. regs.regs[3] = (unsigned long)sp;
  129. } else {
  130. if (!task || task == current)
  131. prepare_frametrace(&regs);
  132. else {
  133. regs.csr_era = task->thread.reg01;
  134. regs.regs[1] = 0;
  135. regs.regs[3] = task->thread.reg03;
  136. regs.regs[22] = task->thread.reg22;
  137. }
  138. }
  139. show_stacktrace(task, &regs, loglvl, false);
  140. }
  141. static void show_code(unsigned int *pc, bool user)
  142. {
  143. long i;
  144. unsigned int insn;
  145. printk("Code:");
  146. for(i = -3 ; i < 6 ; i++) {
  147. if (__get_inst(&insn, pc + i, user)) {
  148. pr_cont(" (Bad address in era)\n");
  149. break;
  150. }
  151. pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
  152. }
  153. pr_cont("\n");
  154. }
  155. static void print_bool_fragment(const char *key, unsigned long val, bool first)
  156. {
  157. /* e.g. "+PG", "-DA" */
  158. pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
  159. }
  160. static void print_plv_fragment(const char *key, int val)
  161. {
  162. /* e.g. "PLV0", "PPLV3" */
  163. pr_cont("%s%d", key, val);
  164. }
  165. static void print_memory_type_fragment(const char *key, unsigned long val)
  166. {
  167. const char *humanized_type;
  168. switch (val) {
  169. case 0:
  170. humanized_type = "SUC";
  171. break;
  172. case 1:
  173. humanized_type = "CC";
  174. break;
  175. case 2:
  176. humanized_type = "WUC";
  177. break;
  178. default:
  179. pr_cont(" %s=Reserved(%lu)", key, val);
  180. return;
  181. }
  182. /* e.g. " DATM=WUC" */
  183. pr_cont(" %s=%s", key, humanized_type);
  184. }
  185. static void print_intr_fragment(const char *key, unsigned long val)
  186. {
  187. /* e.g. "LIE=0-1,3,5-7" */
  188. pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
  189. }
  190. static void print_crmd(unsigned long x)
  191. {
  192. printk(" CRMD: %08lx (", x);
  193. print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
  194. print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
  195. print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
  196. print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
  197. print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
  198. print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
  199. print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
  200. pr_cont(")\n");
  201. }
  202. static void print_prmd(unsigned long x)
  203. {
  204. printk(" PRMD: %08lx (", x);
  205. print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
  206. print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
  207. print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
  208. pr_cont(")\n");
  209. }
  210. static void print_euen(unsigned long x)
  211. {
  212. printk(" EUEN: %08lx (", x);
  213. print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
  214. print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
  215. print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
  216. print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
  217. pr_cont(")\n");
  218. }
  219. static void print_ecfg(unsigned long x)
  220. {
  221. printk(" ECFG: %08lx (", x);
  222. print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
  223. pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
  224. }
  225. static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
  226. {
  227. /*
  228. * LoongArch users and developers are probably more familiar with
  229. * those names found in the ISA manual, so we are going to print out
  230. * the latter. This will require some mapping.
  231. */
  232. switch (ecode) {
  233. case EXCCODE_RSV: return "INT";
  234. case EXCCODE_TLBL: return "PIL";
  235. case EXCCODE_TLBS: return "PIS";
  236. case EXCCODE_TLBI: return "PIF";
  237. case EXCCODE_TLBM: return "PME";
  238. case EXCCODE_TLBNR: return "PNR";
  239. case EXCCODE_TLBNX: return "PNX";
  240. case EXCCODE_TLBPE: return "PPI";
  241. case EXCCODE_ADE:
  242. switch (esubcode) {
  243. case EXSUBCODE_ADEF: return "ADEF";
  244. case EXSUBCODE_ADEM: return "ADEM";
  245. }
  246. break;
  247. case EXCCODE_ALE: return "ALE";
  248. case EXCCODE_BCE: return "BCE";
  249. case EXCCODE_SYS: return "SYS";
  250. case EXCCODE_BP: return "BRK";
  251. case EXCCODE_INE: return "INE";
  252. case EXCCODE_IPE: return "IPE";
  253. case EXCCODE_FPDIS: return "FPD";
  254. case EXCCODE_LSXDIS: return "SXD";
  255. case EXCCODE_LASXDIS: return "ASXD";
  256. case EXCCODE_FPE:
  257. switch (esubcode) {
  258. case EXCSUBCODE_FPE: return "FPE";
  259. case EXCSUBCODE_VFPE: return "VFPE";
  260. }
  261. break;
  262. case EXCCODE_WATCH:
  263. switch (esubcode) {
  264. case EXCSUBCODE_WPEF: return "WPEF";
  265. case EXCSUBCODE_WPEM: return "WPEM";
  266. }
  267. break;
  268. case EXCCODE_BTDIS: return "BTD";
  269. case EXCCODE_BTE: return "BTE";
  270. case EXCCODE_GSPR: return "GSPR";
  271. case EXCCODE_HVC: return "HVC";
  272. case EXCCODE_GCM:
  273. switch (esubcode) {
  274. case EXCSUBCODE_GCSC: return "GCSC";
  275. case EXCSUBCODE_GCHC: return "GCHC";
  276. }
  277. break;
  278. /*
  279. * The manual did not mention the EXCCODE_SE case, but print out it
  280. * nevertheless.
  281. */
  282. case EXCCODE_SE: return "SE";
  283. }
  284. return "???";
  285. }
  286. static void print_estat(unsigned long x)
  287. {
  288. unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
  289. unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
  290. printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
  291. print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
  292. pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
  293. }
  294. static void __show_regs(const struct pt_regs *regs)
  295. {
  296. const int field = 2 * sizeof(unsigned long);
  297. unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
  298. show_regs_print_info(KERN_DEFAULT);
  299. /* Print saved GPRs except $zero (substituting with PC/ERA) */
  300. #define GPR_FIELD(x) field, regs->regs[x]
  301. printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
  302. field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
  303. printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
  304. GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
  305. printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
  306. GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
  307. printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
  308. GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
  309. printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
  310. GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
  311. printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
  312. GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
  313. printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
  314. GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
  315. printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
  316. GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
  317. /* The slot for $zero is reused as the syscall restart flag */
  318. if (regs->regs[0])
  319. printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
  320. if (user_mode(regs)) {
  321. printk(" ra: %0*lx\n", GPR_FIELD(1));
  322. printk(" ERA: %0*lx\n", field, regs->csr_era);
  323. } else {
  324. printk(" ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
  325. printk(" ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
  326. }
  327. #undef GPR_FIELD
  328. /* Print saved important CSRs */
  329. print_crmd(regs->csr_crmd);
  330. print_prmd(regs->csr_prmd);
  331. print_euen(regs->csr_euen);
  332. print_ecfg(regs->csr_ecfg);
  333. print_estat(regs->csr_estat);
  334. if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
  335. printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
  336. printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
  337. cpu_family_string(), cpu_full_name_string());
  338. }
  339. void show_regs(struct pt_regs *regs)
  340. {
  341. __show_regs((struct pt_regs *)regs);
  342. dump_stack();
  343. }
  344. void show_registers(struct pt_regs *regs)
  345. {
  346. __show_regs(regs);
  347. print_modules();
  348. printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
  349. current->comm, current->pid, current_thread_info(), current);
  350. show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
  351. show_code((void *)regs->csr_era, user_mode(regs));
  352. printk("\n");
  353. }
  354. static DEFINE_RAW_SPINLOCK(die_lock);
  355. void die(const char *str, struct pt_regs *regs)
  356. {
  357. int ret;
  358. static int die_counter;
  359. oops_enter();
  360. ret = notify_die(DIE_OOPS, str, regs, 0,
  361. current->thread.trap_nr, SIGSEGV);
  362. console_verbose();
  363. raw_spin_lock_irq(&die_lock);
  364. bust_spinlocks(1);
  365. printk("%s[#%d]:\n", str, ++die_counter);
  366. show_registers(regs);
  367. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  368. raw_spin_unlock_irq(&die_lock);
  369. oops_exit();
  370. if (ret == NOTIFY_STOP)
  371. return;
  372. if (regs && kexec_should_crash(current))
  373. crash_kexec(regs);
  374. if (in_interrupt())
  375. panic("Fatal exception in interrupt");
  376. if (panic_on_oops)
  377. panic("Fatal exception");
  378. make_task_dead(SIGSEGV);
  379. }
  380. static inline void setup_vint_size(unsigned int size)
  381. {
  382. unsigned int vs;
  383. vs = ilog2(size/4);
  384. if (vs == 0 || vs > 7)
  385. panic("vint_size %d Not support yet", vs);
  386. csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
  387. }
  388. /*
  389. * Send SIGFPE according to FCSR Cause bits, which must have already
  390. * been masked against Enable bits. This is impotant as Inexact can
  391. * happen together with Overflow or Underflow, and `ptrace' can set
  392. * any bits.
  393. */
  394. static void force_fcsr_sig(unsigned long fcsr,
  395. void __user *fault_addr, struct task_struct *tsk)
  396. {
  397. int si_code = FPE_FLTUNK;
  398. if (fcsr & FPU_CSR_INV_X)
  399. si_code = FPE_FLTINV;
  400. else if (fcsr & FPU_CSR_DIV_X)
  401. si_code = FPE_FLTDIV;
  402. else if (fcsr & FPU_CSR_OVF_X)
  403. si_code = FPE_FLTOVF;
  404. else if (fcsr & FPU_CSR_UDF_X)
  405. si_code = FPE_FLTUND;
  406. else if (fcsr & FPU_CSR_INE_X)
  407. si_code = FPE_FLTRES;
  408. force_sig_fault(SIGFPE, si_code, fault_addr);
  409. }
  410. static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
  411. {
  412. int si_code;
  413. switch (sig) {
  414. case 0:
  415. return 0;
  416. case SIGFPE:
  417. force_fcsr_sig(fcsr, fault_addr, current);
  418. return 1;
  419. case SIGBUS:
  420. force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
  421. return 1;
  422. case SIGSEGV:
  423. mmap_read_lock(current->mm);
  424. if (vma_lookup(current->mm, (unsigned long)fault_addr))
  425. si_code = SEGV_ACCERR;
  426. else
  427. si_code = SEGV_MAPERR;
  428. mmap_read_unlock(current->mm);
  429. force_sig_fault(SIGSEGV, si_code, fault_addr);
  430. return 1;
  431. default:
  432. force_sig(sig);
  433. return 1;
  434. }
  435. }
  436. /*
  437. * Delayed fp exceptions when doing a lazy ctx switch
  438. */
  439. asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
  440. {
  441. int sig;
  442. void __user *fault_addr;
  443. irqentry_state_t state = irqentry_enter(regs);
  444. if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
  445. SIGFPE) == NOTIFY_STOP)
  446. goto out;
  447. /* Clear FCSR.Cause before enabling interrupts */
  448. write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
  449. local_irq_enable();
  450. die_if_kernel("FP exception in kernel code", regs);
  451. sig = SIGFPE;
  452. fault_addr = (void __user *) regs->csr_era;
  453. /* Send a signal if required. */
  454. process_fpemu_return(sig, fault_addr, fcsr);
  455. out:
  456. local_irq_disable();
  457. irqentry_exit(regs, state);
  458. }
  459. asmlinkage void noinstr do_ade(struct pt_regs *regs)
  460. {
  461. irqentry_state_t state = irqentry_enter(regs);
  462. die_if_kernel("Kernel ade access", regs);
  463. force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
  464. irqentry_exit(regs, state);
  465. }
  466. /* sysctl hooks */
  467. int unaligned_enabled __read_mostly = 1; /* Enabled by default */
  468. int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
  469. asmlinkage void noinstr do_ale(struct pt_regs *regs)
  470. {
  471. irqentry_state_t state = irqentry_enter(regs);
  472. #ifndef CONFIG_ARCH_STRICT_ALIGN
  473. die_if_kernel("Kernel ale access", regs);
  474. force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
  475. #else
  476. unsigned int *pc;
  477. if (regs->csr_prmd & CSR_PRMD_PIE)
  478. local_irq_enable();
  479. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
  480. /*
  481. * Did we catch a fault trying to load an instruction?
  482. */
  483. if (regs->csr_badvaddr == regs->csr_era)
  484. goto sigbus;
  485. if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
  486. goto sigbus;
  487. if (!unaligned_enabled)
  488. goto sigbus;
  489. if (!no_unaligned_warning)
  490. show_registers(regs);
  491. pc = (unsigned int *)exception_era(regs);
  492. emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
  493. goto out;
  494. sigbus:
  495. die_if_kernel("Kernel ale access", regs);
  496. force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
  497. out:
  498. if (regs->csr_prmd & CSR_PRMD_PIE)
  499. local_irq_disable();
  500. #endif
  501. irqentry_exit(regs, state);
  502. }
  503. #ifdef CONFIG_GENERIC_BUG
  504. int is_valid_bugaddr(unsigned long addr)
  505. {
  506. return 1;
  507. }
  508. #endif /* CONFIG_GENERIC_BUG */
  509. static void bug_handler(struct pt_regs *regs)
  510. {
  511. switch (report_bug(regs->csr_era, regs)) {
  512. case BUG_TRAP_TYPE_BUG:
  513. case BUG_TRAP_TYPE_NONE:
  514. die_if_kernel("Oops - BUG", regs);
  515. force_sig(SIGTRAP);
  516. break;
  517. case BUG_TRAP_TYPE_WARN:
  518. /* Skip the BUG instruction and continue */
  519. regs->csr_era += LOONGARCH_INSN_SIZE;
  520. break;
  521. }
  522. }
  523. asmlinkage void noinstr do_bce(struct pt_regs *regs)
  524. {
  525. bool user = user_mode(regs);
  526. unsigned long era = exception_era(regs);
  527. u64 badv = 0, lower = 0, upper = ULONG_MAX;
  528. union loongarch_instruction insn;
  529. irqentry_state_t state = irqentry_enter(regs);
  530. if (regs->csr_prmd & CSR_PRMD_PIE)
  531. local_irq_enable();
  532. current->thread.trap_nr = read_csr_excode();
  533. die_if_kernel("Bounds check error in kernel code", regs);
  534. /*
  535. * Pull out the address that failed bounds checking, and the lower /
  536. * upper bound, by minimally looking at the faulting instruction word
  537. * and reading from the correct register.
  538. */
  539. if (__get_inst(&insn.word, (u32 *)era, user))
  540. goto bad_era;
  541. switch (insn.reg3_format.opcode) {
  542. case asrtle_op:
  543. if (insn.reg3_format.rd != 0)
  544. break; /* not asrtle */
  545. badv = regs->regs[insn.reg3_format.rj];
  546. upper = regs->regs[insn.reg3_format.rk];
  547. break;
  548. case asrtgt_op:
  549. if (insn.reg3_format.rd != 0)
  550. break; /* not asrtgt */
  551. badv = regs->regs[insn.reg3_format.rj];
  552. lower = regs->regs[insn.reg3_format.rk];
  553. break;
  554. case ldleb_op:
  555. case ldleh_op:
  556. case ldlew_op:
  557. case ldled_op:
  558. case stleb_op:
  559. case stleh_op:
  560. case stlew_op:
  561. case stled_op:
  562. case fldles_op:
  563. case fldled_op:
  564. case fstles_op:
  565. case fstled_op:
  566. badv = regs->regs[insn.reg3_format.rj];
  567. upper = regs->regs[insn.reg3_format.rk];
  568. break;
  569. case ldgtb_op:
  570. case ldgth_op:
  571. case ldgtw_op:
  572. case ldgtd_op:
  573. case stgtb_op:
  574. case stgth_op:
  575. case stgtw_op:
  576. case stgtd_op:
  577. case fldgts_op:
  578. case fldgtd_op:
  579. case fstgts_op:
  580. case fstgtd_op:
  581. badv = regs->regs[insn.reg3_format.rj];
  582. lower = regs->regs[insn.reg3_format.rk];
  583. break;
  584. }
  585. force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
  586. out:
  587. if (regs->csr_prmd & CSR_PRMD_PIE)
  588. local_irq_disable();
  589. irqentry_exit(regs, state);
  590. return;
  591. bad_era:
  592. /*
  593. * Cannot pull out the instruction word, hence cannot provide more
  594. * info than a regular SIGSEGV in this case.
  595. */
  596. force_sig(SIGSEGV);
  597. goto out;
  598. }
  599. asmlinkage void noinstr do_bp(struct pt_regs *regs)
  600. {
  601. bool user = user_mode(regs);
  602. unsigned int opcode, bcode;
  603. unsigned long era = exception_era(regs);
  604. irqentry_state_t state = irqentry_enter(regs);
  605. if (regs->csr_prmd & CSR_PRMD_PIE)
  606. local_irq_enable();
  607. if (__get_inst(&opcode, (u32 *)era, user))
  608. goto out_sigsegv;
  609. bcode = (opcode & 0x7fff);
  610. /*
  611. * notify the kprobe handlers, if instruction is likely to
  612. * pertain to them.
  613. */
  614. switch (bcode) {
  615. case BRK_KDB:
  616. if (kgdb_breakpoint_handler(regs))
  617. goto out;
  618. else
  619. break;
  620. case BRK_KPROBE_BP:
  621. if (kprobe_breakpoint_handler(regs))
  622. goto out;
  623. else
  624. break;
  625. case BRK_KPROBE_SSTEPBP:
  626. if (kprobe_singlestep_handler(regs))
  627. goto out;
  628. else
  629. break;
  630. case BRK_UPROBE_BP:
  631. if (uprobe_breakpoint_handler(regs))
  632. goto out;
  633. else
  634. break;
  635. case BRK_UPROBE_XOLBP:
  636. if (uprobe_singlestep_handler(regs))
  637. goto out;
  638. else
  639. break;
  640. default:
  641. current->thread.trap_nr = read_csr_excode();
  642. if (notify_die(DIE_TRAP, "Break", regs, bcode,
  643. current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
  644. goto out;
  645. else
  646. break;
  647. }
  648. switch (bcode) {
  649. case BRK_BUG:
  650. bug_handler(regs);
  651. break;
  652. case BRK_DIVZERO:
  653. die_if_kernel("Break instruction in kernel code", regs);
  654. force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
  655. break;
  656. case BRK_OVERFLOW:
  657. die_if_kernel("Break instruction in kernel code", regs);
  658. force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
  659. break;
  660. default:
  661. die_if_kernel("Break instruction in kernel code", regs);
  662. force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
  663. break;
  664. }
  665. out:
  666. if (regs->csr_prmd & CSR_PRMD_PIE)
  667. local_irq_disable();
  668. irqentry_exit(regs, state);
  669. return;
  670. out_sigsegv:
  671. force_sig(SIGSEGV);
  672. goto out;
  673. }
  674. asmlinkage void noinstr do_watch(struct pt_regs *regs)
  675. {
  676. irqentry_state_t state = irqentry_enter(regs);
  677. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  678. pr_warn("Hardware watch point handler not implemented!\n");
  679. #else
  680. if (kgdb_breakpoint_handler(regs))
  681. goto out;
  682. if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
  683. int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
  684. unsigned long pc = instruction_pointer(regs);
  685. union loongarch_instruction *ip = (union loongarch_instruction *)pc;
  686. if (llbit) {
  687. /*
  688. * When the ll-sc combo is encountered, it is regarded as an single
  689. * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
  690. * the llsc execution is completed.
  691. */
  692. csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
  693. csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
  694. goto out;
  695. }
  696. if (pc == current->thread.single_step) {
  697. /*
  698. * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
  699. * set, such as fld.d/fst.d. So singlestep needs to compare whether
  700. * the csr_era is equal to the value of singlestep which last time set.
  701. */
  702. if (!is_self_loop_ins(ip, regs)) {
  703. /*
  704. * Check if the given instruction the target pc is equal to the
  705. * current pc, If yes, then we should not set the CSR.FWPS.SKIP
  706. * bit to break the original instruction stream.
  707. */
  708. csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
  709. goto out;
  710. }
  711. }
  712. } else {
  713. breakpoint_handler(regs);
  714. watchpoint_handler(regs);
  715. }
  716. force_sig(SIGTRAP);
  717. out:
  718. #endif
  719. irqentry_exit(regs, state);
  720. }
  721. asmlinkage void noinstr do_ri(struct pt_regs *regs)
  722. {
  723. int status = SIGILL;
  724. unsigned int __maybe_unused opcode;
  725. unsigned int __user *era = (unsigned int __user *)exception_era(regs);
  726. irqentry_state_t state = irqentry_enter(regs);
  727. local_irq_enable();
  728. current->thread.trap_nr = read_csr_excode();
  729. if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
  730. SIGILL) == NOTIFY_STOP)
  731. goto out;
  732. die_if_kernel("Reserved instruction in kernel code", regs);
  733. if (unlikely(get_user(opcode, era) < 0)) {
  734. status = SIGSEGV;
  735. current->thread.error_code = 1;
  736. }
  737. force_sig(status);
  738. out:
  739. local_irq_disable();
  740. irqentry_exit(regs, state);
  741. }
  742. static void init_restore_fp(void)
  743. {
  744. if (!used_math()) {
  745. /* First time FP context user. */
  746. init_fpu();
  747. } else {
  748. /* This task has formerly used the FP context */
  749. if (!is_fpu_owner())
  750. own_fpu_inatomic(1);
  751. }
  752. BUG_ON(!is_fp_enabled());
  753. }
  754. static void init_restore_lsx(void)
  755. {
  756. enable_lsx();
  757. if (!thread_lsx_context_live()) {
  758. /* First time LSX context user */
  759. init_restore_fp();
  760. init_lsx_upper();
  761. set_thread_flag(TIF_LSX_CTX_LIVE);
  762. } else {
  763. if (!is_simd_owner()) {
  764. if (is_fpu_owner()) {
  765. restore_lsx_upper(current);
  766. } else {
  767. __own_fpu();
  768. restore_lsx(current);
  769. }
  770. }
  771. }
  772. set_thread_flag(TIF_USEDSIMD);
  773. BUG_ON(!is_fp_enabled());
  774. BUG_ON(!is_lsx_enabled());
  775. }
  776. static void init_restore_lasx(void)
  777. {
  778. enable_lasx();
  779. if (!thread_lasx_context_live()) {
  780. /* First time LASX context user */
  781. init_restore_lsx();
  782. init_lasx_upper();
  783. set_thread_flag(TIF_LASX_CTX_LIVE);
  784. } else {
  785. if (is_fpu_owner() || is_simd_owner()) {
  786. init_restore_lsx();
  787. restore_lasx_upper(current);
  788. } else {
  789. __own_fpu();
  790. enable_lsx();
  791. restore_lasx(current);
  792. }
  793. }
  794. set_thread_flag(TIF_USEDSIMD);
  795. BUG_ON(!is_fp_enabled());
  796. BUG_ON(!is_lsx_enabled());
  797. BUG_ON(!is_lasx_enabled());
  798. }
  799. asmlinkage void noinstr do_fpu(struct pt_regs *regs)
  800. {
  801. irqentry_state_t state = irqentry_enter(regs);
  802. local_irq_enable();
  803. die_if_kernel("do_fpu invoked from kernel context!", regs);
  804. BUG_ON(is_lsx_enabled());
  805. BUG_ON(is_lasx_enabled());
  806. preempt_disable();
  807. init_restore_fp();
  808. preempt_enable();
  809. local_irq_disable();
  810. irqentry_exit(regs, state);
  811. }
  812. asmlinkage void noinstr do_lsx(struct pt_regs *regs)
  813. {
  814. irqentry_state_t state = irqentry_enter(regs);
  815. local_irq_enable();
  816. if (!cpu_has_lsx) {
  817. force_sig(SIGILL);
  818. goto out;
  819. }
  820. die_if_kernel("do_lsx invoked from kernel context!", regs);
  821. BUG_ON(is_lasx_enabled());
  822. preempt_disable();
  823. init_restore_lsx();
  824. preempt_enable();
  825. out:
  826. local_irq_disable();
  827. irqentry_exit(regs, state);
  828. }
  829. asmlinkage void noinstr do_lasx(struct pt_regs *regs)
  830. {
  831. irqentry_state_t state = irqentry_enter(regs);
  832. local_irq_enable();
  833. if (!cpu_has_lasx) {
  834. force_sig(SIGILL);
  835. goto out;
  836. }
  837. die_if_kernel("do_lasx invoked from kernel context!", regs);
  838. preempt_disable();
  839. init_restore_lasx();
  840. preempt_enable();
  841. out:
  842. local_irq_disable();
  843. irqentry_exit(regs, state);
  844. }
  845. static void init_restore_lbt(void)
  846. {
  847. if (!thread_lbt_context_live()) {
  848. /* First time LBT context user */
  849. init_lbt();
  850. set_thread_flag(TIF_LBT_CTX_LIVE);
  851. } else {
  852. if (!is_lbt_owner())
  853. own_lbt_inatomic(1);
  854. }
  855. BUG_ON(!is_lbt_enabled());
  856. }
  857. asmlinkage void noinstr do_lbt(struct pt_regs *regs)
  858. {
  859. irqentry_state_t state = irqentry_enter(regs);
  860. /*
  861. * BTD (Binary Translation Disable exception) can be triggered
  862. * during FP save/restore if TM (Top Mode) is on, which may
  863. * cause irq_enable during 'switch_to'. To avoid this situation
  864. * (including the user using 'MOVGR2GCSR' to turn on TM, which
  865. * will not trigger the BTE), we need to check PRMD first.
  866. */
  867. if (regs->csr_prmd & CSR_PRMD_PIE)
  868. local_irq_enable();
  869. if (!cpu_has_lbt) {
  870. force_sig(SIGILL);
  871. goto out;
  872. }
  873. BUG_ON(is_lbt_enabled());
  874. preempt_disable();
  875. init_restore_lbt();
  876. preempt_enable();
  877. out:
  878. if (regs->csr_prmd & CSR_PRMD_PIE)
  879. local_irq_disable();
  880. irqentry_exit(regs, state);
  881. }
  882. asmlinkage void noinstr do_reserved(struct pt_regs *regs)
  883. {
  884. irqentry_state_t state = irqentry_enter(regs);
  885. local_irq_enable();
  886. /*
  887. * Game over - no way to handle this if it ever occurs. Most probably
  888. * caused by a fatal error after another hardware/software error.
  889. */
  890. pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
  891. read_csr_excode(), current->pid, current->comm);
  892. die_if_kernel("do_reserved exception", regs);
  893. force_sig(SIGUNUSED);
  894. local_irq_disable();
  895. irqentry_exit(regs, state);
  896. }
  897. asmlinkage void cache_parity_error(void)
  898. {
  899. /* For the moment, report the problem and hang. */
  900. pr_err("Cache error exception:\n");
  901. pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
  902. pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
  903. panic("Can't handle the cache error!");
  904. }
  905. asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
  906. {
  907. struct pt_regs *old_regs;
  908. irq_enter_rcu();
  909. old_regs = set_irq_regs(regs);
  910. handle_arch_irq(regs);
  911. set_irq_regs(old_regs);
  912. irq_exit_rcu();
  913. }
  914. asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
  915. {
  916. register int cpu;
  917. register unsigned long stack;
  918. irqentry_state_t state = irqentry_enter(regs);
  919. cpu = smp_processor_id();
  920. if (on_irq_stack(cpu, sp))
  921. handle_loongarch_irq(regs);
  922. else {
  923. stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
  924. /* Save task's sp on IRQ stack for unwinding */
  925. *(unsigned long *)stack = sp;
  926. __asm__ __volatile__(
  927. "move $s0, $sp \n" /* Preserve sp */
  928. "move $sp, %[stk] \n" /* Switch stack */
  929. "move $a0, %[regs] \n"
  930. "bl handle_loongarch_irq \n"
  931. "move $sp, $s0 \n" /* Restore sp */
  932. : /* No outputs */
  933. : [stk] "r" (stack), [regs] "r" (regs)
  934. : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
  935. "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
  936. "memory");
  937. }
  938. irqentry_exit(regs, state);
  939. }
  940. unsigned long eentry;
  941. unsigned long tlbrentry;
  942. long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
  943. static void configure_exception_vector(void)
  944. {
  945. eentry = (unsigned long)exception_handlers;
  946. tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
  947. csr_write64(eentry, LOONGARCH_CSR_EENTRY);
  948. csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
  949. csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
  950. }
  951. void per_cpu_trap_init(int cpu)
  952. {
  953. unsigned int i;
  954. setup_vint_size(VECSIZE);
  955. configure_exception_vector();
  956. if (!cpu_data[cpu].asid_cache)
  957. cpu_data[cpu].asid_cache = asid_first_version(cpu);
  958. mmgrab(&init_mm);
  959. current->active_mm = &init_mm;
  960. BUG_ON(current->mm);
  961. enter_lazy_tlb(&init_mm, current);
  962. /* Initialise exception handlers */
  963. if (cpu == 0)
  964. for (i = 0; i < 64; i++)
  965. set_handler(i * VECSIZE, handle_reserved, VECSIZE);
  966. tlb_init(cpu);
  967. cpu_cache_init();
  968. }
  969. /* Install CPU exception handler */
  970. void set_handler(unsigned long offset, void *addr, unsigned long size)
  971. {
  972. memcpy((void *)(eentry + offset), addr, size);
  973. local_flush_icache_range(eentry + offset, eentry + offset + size);
  974. }
  975. static const char panic_null_cerr[] =
  976. "Trying to set NULL cache error exception handler\n";
  977. /*
  978. * Install uncached CPU exception handler.
  979. * This is suitable only for the cache error exception which is the only
  980. * exception handler that is being run uncached.
  981. */
  982. void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
  983. {
  984. unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
  985. if (!addr)
  986. panic(panic_null_cerr);
  987. memcpy((void *)(uncached_eentry + offset), addr, size);
  988. }
  989. void __init trap_init(void)
  990. {
  991. long i;
  992. /* Set interrupt vector handler */
  993. for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
  994. set_handler(i * VECSIZE, handle_vint, VECSIZE);
  995. /* Set exception vector handler */
  996. for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++)
  997. set_handler(i * VECSIZE, exception_table[i], VECSIZE);
  998. cache_error_setup();
  999. local_flush_icache_range(eentry, eentry + 0x400);
  1000. }