traps_misaligned.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020 Western Digital Corporation or its affiliates.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/init.h>
  7. #include <linux/mm.h>
  8. #include <linux/module.h>
  9. #include <linux/perf_event.h>
  10. #include <linux/irq.h>
  11. #include <linux/stringify.h>
  12. #include <asm/processor.h>
  13. #include <asm/ptrace.h>
  14. #include <asm/csr.h>
  15. #include <asm/entry-common.h>
  16. #include <asm/hwprobe.h>
  17. #include <asm/cpufeature.h>
  18. #define INSN_MATCH_LB 0x3
  19. #define INSN_MASK_LB 0x707f
  20. #define INSN_MATCH_LH 0x1003
  21. #define INSN_MASK_LH 0x707f
  22. #define INSN_MATCH_LW 0x2003
  23. #define INSN_MASK_LW 0x707f
  24. #define INSN_MATCH_LD 0x3003
  25. #define INSN_MASK_LD 0x707f
  26. #define INSN_MATCH_LBU 0x4003
  27. #define INSN_MASK_LBU 0x707f
  28. #define INSN_MATCH_LHU 0x5003
  29. #define INSN_MASK_LHU 0x707f
  30. #define INSN_MATCH_LWU 0x6003
  31. #define INSN_MASK_LWU 0x707f
  32. #define INSN_MATCH_SB 0x23
  33. #define INSN_MASK_SB 0x707f
  34. #define INSN_MATCH_SH 0x1023
  35. #define INSN_MASK_SH 0x707f
  36. #define INSN_MATCH_SW 0x2023
  37. #define INSN_MASK_SW 0x707f
  38. #define INSN_MATCH_SD 0x3023
  39. #define INSN_MASK_SD 0x707f
  40. #define INSN_MATCH_FLW 0x2007
  41. #define INSN_MASK_FLW 0x707f
  42. #define INSN_MATCH_FLD 0x3007
  43. #define INSN_MASK_FLD 0x707f
  44. #define INSN_MATCH_FLQ 0x4007
  45. #define INSN_MASK_FLQ 0x707f
  46. #define INSN_MATCH_FSW 0x2027
  47. #define INSN_MASK_FSW 0x707f
  48. #define INSN_MATCH_FSD 0x3027
  49. #define INSN_MASK_FSD 0x707f
  50. #define INSN_MATCH_FSQ 0x4027
  51. #define INSN_MASK_FSQ 0x707f
  52. #define INSN_MATCH_C_LD 0x6000
  53. #define INSN_MASK_C_LD 0xe003
  54. #define INSN_MATCH_C_SD 0xe000
  55. #define INSN_MASK_C_SD 0xe003
  56. #define INSN_MATCH_C_LW 0x4000
  57. #define INSN_MASK_C_LW 0xe003
  58. #define INSN_MATCH_C_SW 0xc000
  59. #define INSN_MASK_C_SW 0xe003
  60. #define INSN_MATCH_C_LDSP 0x6002
  61. #define INSN_MASK_C_LDSP 0xe003
  62. #define INSN_MATCH_C_SDSP 0xe002
  63. #define INSN_MASK_C_SDSP 0xe003
  64. #define INSN_MATCH_C_LWSP 0x4002
  65. #define INSN_MASK_C_LWSP 0xe003
  66. #define INSN_MATCH_C_SWSP 0xc002
  67. #define INSN_MASK_C_SWSP 0xe003
  68. #define INSN_MATCH_C_FLD 0x2000
  69. #define INSN_MASK_C_FLD 0xe003
  70. #define INSN_MATCH_C_FLW 0x6000
  71. #define INSN_MASK_C_FLW 0xe003
  72. #define INSN_MATCH_C_FSD 0xa000
  73. #define INSN_MASK_C_FSD 0xe003
  74. #define INSN_MATCH_C_FSW 0xe000
  75. #define INSN_MASK_C_FSW 0xe003
  76. #define INSN_MATCH_C_FLDSP 0x2002
  77. #define INSN_MASK_C_FLDSP 0xe003
  78. #define INSN_MATCH_C_FSDSP 0xa002
  79. #define INSN_MASK_C_FSDSP 0xe003
  80. #define INSN_MATCH_C_FLWSP 0x6002
  81. #define INSN_MASK_C_FLWSP 0xe003
  82. #define INSN_MATCH_C_FSWSP 0xe002
  83. #define INSN_MASK_C_FSWSP 0xe003
  84. #define INSN_MATCH_C_LHU 0x8400
  85. #define INSN_MASK_C_LHU 0xfc43
  86. #define INSN_MATCH_C_LH 0x8440
  87. #define INSN_MASK_C_LH 0xfc43
  88. #define INSN_MATCH_C_SH 0x8c00
  89. #define INSN_MASK_C_SH 0xfc43
  90. #define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
  91. #if defined(CONFIG_64BIT)
  92. #define LOG_REGBYTES 3
  93. #define XLEN 64
  94. #else
  95. #define LOG_REGBYTES 2
  96. #define XLEN 32
  97. #endif
  98. #define REGBYTES (1 << LOG_REGBYTES)
  99. #define XLEN_MINUS_16 ((XLEN) - 16)
  100. #define SH_RD 7
  101. #define SH_RS1 15
  102. #define SH_RS2 20
  103. #define SH_RS2C 2
  104. #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
  105. #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
  106. (RV_X(x, 10, 3) << 3) | \
  107. (RV_X(x, 5, 1) << 6))
  108. #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
  109. (RV_X(x, 5, 2) << 6))
  110. #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
  111. (RV_X(x, 12, 1) << 5) | \
  112. (RV_X(x, 2, 2) << 6))
  113. #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
  114. (RV_X(x, 12, 1) << 5) | \
  115. (RV_X(x, 2, 3) << 6))
  116. #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
  117. (RV_X(x, 7, 2) << 6))
  118. #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
  119. (RV_X(x, 7, 3) << 6))
  120. #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
  121. #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
  122. #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
  123. #define SHIFT_RIGHT(x, y) \
  124. ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
  125. #define REG_MASK \
  126. ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
  127. #define REG_OFFSET(insn, pos) \
  128. (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
  129. #define REG_PTR(insn, pos, regs) \
  130. (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
  131. #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
  132. #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
  133. #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
  134. #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
  135. #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
  136. #define GET_SP(regs) (*REG_PTR(2, 0, regs))
  137. #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
  138. #define IMM_I(insn) ((s32)(insn) >> 20)
  139. #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
  140. (s32)(((insn) >> 7) & 0x1f))
  141. #define MASK_FUNCT3 0x7000
  142. #define GET_PRECISION(insn) (((insn) >> 25) & 3)
  143. #define GET_RM(insn) (((insn) >> 12) & 7)
  144. #define PRECISION_S 0
  145. #define PRECISION_D 1
  146. #ifdef CONFIG_FPU
  147. #define FP_GET_RD(insn) (insn >> 7 & 0x1F)
  148. extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
  149. static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
  150. unsigned long val)
  151. {
  152. unsigned long fp_reg = FP_GET_RD(insn);
  153. put_f32_reg(fp_reg, val);
  154. regs->status |= SR_FS_DIRTY;
  155. return 0;
  156. }
  157. extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
  158. static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
  159. {
  160. unsigned long fp_reg = FP_GET_RD(insn);
  161. unsigned long value;
  162. #if __riscv_xlen == 32
  163. value = (unsigned long) &val;
  164. #else
  165. value = val;
  166. #endif
  167. put_f64_reg(fp_reg, value);
  168. regs->status |= SR_FS_DIRTY;
  169. return 0;
  170. }
  171. #if __riscv_xlen == 32
  172. extern void get_f64_reg(unsigned long fp_reg, u64 *value);
  173. static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
  174. struct pt_regs *regs)
  175. {
  176. unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
  177. u64 val;
  178. get_f64_reg(fp_reg, &val);
  179. regs->status |= SR_FS_DIRTY;
  180. return val;
  181. }
  182. #else
  183. extern unsigned long get_f64_reg(unsigned long fp_reg);
  184. static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
  185. struct pt_regs *regs)
  186. {
  187. unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
  188. unsigned long val;
  189. val = get_f64_reg(fp_reg);
  190. regs->status |= SR_FS_DIRTY;
  191. return val;
  192. }
  193. #endif
  194. extern unsigned long get_f32_reg(unsigned long fp_reg);
  195. static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
  196. struct pt_regs *regs)
  197. {
  198. unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
  199. unsigned long val;
  200. val = get_f32_reg(fp_reg);
  201. regs->status |= SR_FS_DIRTY;
  202. return val;
  203. }
  204. #else /* CONFIG_FPU */
  205. static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
  206. unsigned long val) {}
  207. static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
  208. static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
  209. struct pt_regs *regs)
  210. {
  211. return 0;
  212. }
  213. static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
  214. struct pt_regs *regs)
  215. {
  216. return 0;
  217. }
  218. #endif
  219. #define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
  220. #define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
  221. #define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
  222. #define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
  223. #define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
  224. #define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
  225. #define __read_insn(regs, insn, insn_addr, type) \
  226. ({ \
  227. int __ret; \
  228. \
  229. if (user_mode(regs)) { \
  230. __ret = __get_user(insn, (type __user *) insn_addr); \
  231. } else { \
  232. insn = *(type *)insn_addr; \
  233. __ret = 0; \
  234. } \
  235. \
  236. __ret; \
  237. })
  238. static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
  239. {
  240. ulong insn = 0;
  241. if (epc & 0x2) {
  242. ulong tmp = 0;
  243. if (__read_insn(regs, insn, epc, u16))
  244. return -EFAULT;
  245. /* __get_user() uses regular "lw" which sign extend the loaded
  246. * value make sure to clear higher order bits in case we "or" it
  247. * below with the upper 16 bits half.
  248. */
  249. insn &= GENMASK(15, 0);
  250. if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
  251. *r_insn = insn;
  252. return 0;
  253. }
  254. epc += sizeof(u16);
  255. if (__read_insn(regs, tmp, epc, u16))
  256. return -EFAULT;
  257. *r_insn = (tmp << 16) | insn;
  258. return 0;
  259. } else {
  260. if (__read_insn(regs, insn, epc, u32))
  261. return -EFAULT;
  262. if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
  263. *r_insn = insn;
  264. return 0;
  265. }
  266. insn &= GENMASK(15, 0);
  267. *r_insn = insn;
  268. return 0;
  269. }
  270. }
  271. union reg_data {
  272. u8 data_bytes[8];
  273. ulong data_ulong;
  274. u64 data_u64;
  275. };
  276. static bool unaligned_ctl __read_mostly;
  277. /* sysctl hooks */
  278. int unaligned_enabled __read_mostly = 1; /* Enabled by default */
  279. int handle_misaligned_load(struct pt_regs *regs)
  280. {
  281. union reg_data val;
  282. unsigned long epc = regs->epc;
  283. unsigned long insn;
  284. unsigned long addr = regs->badaddr;
  285. int fp = 0, shift = 0, len = 0;
  286. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
  287. #ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
  288. *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
  289. #endif
  290. if (!unaligned_enabled)
  291. return -1;
  292. if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
  293. return -1;
  294. if (get_insn(regs, epc, &insn))
  295. return -1;
  296. regs->epc = 0;
  297. if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
  298. len = 4;
  299. shift = 8 * (sizeof(unsigned long) - len);
  300. #if defined(CONFIG_64BIT)
  301. } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
  302. len = 8;
  303. shift = 8 * (sizeof(unsigned long) - len);
  304. } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
  305. len = 4;
  306. #endif
  307. } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
  308. fp = 1;
  309. len = 8;
  310. } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
  311. fp = 1;
  312. len = 4;
  313. } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
  314. len = 2;
  315. shift = 8 * (sizeof(unsigned long) - len);
  316. } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
  317. len = 2;
  318. #if defined(CONFIG_64BIT)
  319. } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
  320. len = 8;
  321. shift = 8 * (sizeof(unsigned long) - len);
  322. insn = RVC_RS2S(insn) << SH_RD;
  323. } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
  324. ((insn >> SH_RD) & 0x1f)) {
  325. len = 8;
  326. shift = 8 * (sizeof(unsigned long) - len);
  327. #endif
  328. } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
  329. len = 4;
  330. shift = 8 * (sizeof(unsigned long) - len);
  331. insn = RVC_RS2S(insn) << SH_RD;
  332. } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
  333. ((insn >> SH_RD) & 0x1f)) {
  334. len = 4;
  335. shift = 8 * (sizeof(unsigned long) - len);
  336. } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
  337. fp = 1;
  338. len = 8;
  339. insn = RVC_RS2S(insn) << SH_RD;
  340. } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
  341. fp = 1;
  342. len = 8;
  343. #if defined(CONFIG_32BIT)
  344. } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
  345. fp = 1;
  346. len = 4;
  347. insn = RVC_RS2S(insn) << SH_RD;
  348. } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
  349. fp = 1;
  350. len = 4;
  351. #endif
  352. } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
  353. len = 2;
  354. insn = RVC_RS2S(insn) << SH_RD;
  355. } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
  356. len = 2;
  357. shift = 8 * (sizeof(ulong) - len);
  358. insn = RVC_RS2S(insn) << SH_RD;
  359. } else {
  360. regs->epc = epc;
  361. return -1;
  362. }
  363. if (!IS_ENABLED(CONFIG_FPU) && fp)
  364. return -EOPNOTSUPP;
  365. val.data_u64 = 0;
  366. if (user_mode(regs)) {
  367. if (copy_from_user(&val, (u8 __user *)addr, len))
  368. return -1;
  369. } else {
  370. memcpy(&val, (u8 *)addr, len);
  371. }
  372. if (!fp)
  373. SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift);
  374. else if (len == 8)
  375. set_f64_rd(insn, regs, val.data_u64);
  376. else
  377. set_f32_rd(insn, regs, val.data_ulong);
  378. regs->epc = epc + INSN_LEN(insn);
  379. return 0;
  380. }
  381. int handle_misaligned_store(struct pt_regs *regs)
  382. {
  383. union reg_data val;
  384. unsigned long epc = regs->epc;
  385. unsigned long insn;
  386. unsigned long addr = regs->badaddr;
  387. int len = 0, fp = 0;
  388. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
  389. if (!unaligned_enabled)
  390. return -1;
  391. if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
  392. return -1;
  393. if (get_insn(regs, epc, &insn))
  394. return -1;
  395. regs->epc = 0;
  396. val.data_ulong = GET_RS2(insn, regs);
  397. if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
  398. len = 4;
  399. #if defined(CONFIG_64BIT)
  400. } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
  401. len = 8;
  402. #endif
  403. } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
  404. fp = 1;
  405. len = 8;
  406. val.data_u64 = GET_F64_RS2(insn, regs);
  407. } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
  408. fp = 1;
  409. len = 4;
  410. val.data_ulong = GET_F32_RS2(insn, regs);
  411. } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
  412. len = 2;
  413. #if defined(CONFIG_64BIT)
  414. } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
  415. len = 8;
  416. val.data_ulong = GET_RS2S(insn, regs);
  417. } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
  418. len = 8;
  419. val.data_ulong = GET_RS2C(insn, regs);
  420. #endif
  421. } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
  422. len = 4;
  423. val.data_ulong = GET_RS2S(insn, regs);
  424. } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
  425. len = 4;
  426. val.data_ulong = GET_RS2C(insn, regs);
  427. } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
  428. fp = 1;
  429. len = 8;
  430. val.data_u64 = GET_F64_RS2S(insn, regs);
  431. } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
  432. fp = 1;
  433. len = 8;
  434. val.data_u64 = GET_F64_RS2C(insn, regs);
  435. #if !defined(CONFIG_64BIT)
  436. } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
  437. fp = 1;
  438. len = 4;
  439. val.data_ulong = GET_F32_RS2S(insn, regs);
  440. } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
  441. fp = 1;
  442. len = 4;
  443. val.data_ulong = GET_F32_RS2C(insn, regs);
  444. #endif
  445. } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
  446. len = 2;
  447. val.data_ulong = GET_RS2S(insn, regs);
  448. } else {
  449. regs->epc = epc;
  450. return -1;
  451. }
  452. if (!IS_ENABLED(CONFIG_FPU) && fp)
  453. return -EOPNOTSUPP;
  454. if (user_mode(regs)) {
  455. if (copy_to_user((u8 __user *)addr, &val, len))
  456. return -1;
  457. } else {
  458. memcpy((u8 *)addr, &val, len);
  459. }
  460. regs->epc = epc + INSN_LEN(insn);
  461. return 0;
  462. }
  463. void check_unaligned_access_emulated(struct work_struct *work __always_unused)
  464. {
  465. int cpu = smp_processor_id();
  466. long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
  467. unsigned long tmp_var, tmp_val;
  468. *mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
  469. __asm__ __volatile__ (
  470. " "REG_L" %[tmp], 1(%[ptr])\n"
  471. : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
  472. /*
  473. * If unaligned_ctl is already set, this means that we detected that all
  474. * CPUS uses emulated misaligned access at boot time. If that changed
  475. * when hotplugging the new cpu, this is something we don't handle.
  476. */
  477. if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
  478. pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
  479. while (true)
  480. cpu_relax();
  481. }
  482. }
  483. bool check_unaligned_access_emulated_all_cpus(void)
  484. {
  485. int cpu;
  486. /*
  487. * We can only support PR_UNALIGN controls if all CPUs have misaligned
  488. * accesses emulated since tasks requesting such control can run on any
  489. * CPU.
  490. */
  491. schedule_on_each_cpu(check_unaligned_access_emulated);
  492. for_each_online_cpu(cpu)
  493. if (per_cpu(misaligned_access_speed, cpu)
  494. != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
  495. return false;
  496. unaligned_ctl = true;
  497. return true;
  498. }
  499. bool unaligned_ctl_available(void)
  500. {
  501. return unaligned_ctl;
  502. }