alternative.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * alternative runtime patching
  4. * inspired by the ARM64 and x86 version
  5. *
  6. * Copyright (C) 2021 Sifive.
  7. */
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/cpu.h>
  11. #include <linux/uaccess.h>
  12. #include <asm/alternative.h>
  13. #include <asm/module.h>
  14. #include <asm/sections.h>
  15. #include <asm/vdso.h>
  16. #include <asm/vendorid_list.h>
  17. #include <asm/sbi.h>
  18. #include <asm/csr.h>
  19. #include <asm/insn.h>
  20. #include <asm/patch.h>
  21. struct cpu_manufacturer_info_t {
  22. unsigned long vendor_id;
  23. unsigned long arch_id;
  24. unsigned long imp_id;
  25. void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
  26. unsigned long archid, unsigned long impid,
  27. unsigned int stage);
  28. };
  29. static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
  30. {
  31. #ifdef CONFIG_RISCV_M_MODE
  32. cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
  33. cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
  34. cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
  35. #else
  36. cpu_mfr_info->vendor_id = sbi_get_mvendorid();
  37. cpu_mfr_info->arch_id = sbi_get_marchid();
  38. cpu_mfr_info->imp_id = sbi_get_mimpid();
  39. #endif
  40. switch (cpu_mfr_info->vendor_id) {
  41. #ifdef CONFIG_ERRATA_ANDES
  42. case ANDES_VENDOR_ID:
  43. cpu_mfr_info->patch_func = andes_errata_patch_func;
  44. break;
  45. #endif
  46. #ifdef CONFIG_ERRATA_SIFIVE
  47. case SIFIVE_VENDOR_ID:
  48. cpu_mfr_info->patch_func = sifive_errata_patch_func;
  49. break;
  50. #endif
  51. #ifdef CONFIG_ERRATA_THEAD
  52. case THEAD_VENDOR_ID:
  53. cpu_mfr_info->patch_func = thead_errata_patch_func;
  54. break;
  55. #endif
  56. default:
  57. cpu_mfr_info->patch_func = NULL;
  58. }
  59. }
  60. static u32 riscv_instruction_at(void *p)
  61. {
  62. u16 *parcel = p;
  63. return (u32)parcel[0] | (u32)parcel[1] << 16;
  64. }
  65. static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn,
  66. u32 jalr_insn, int patch_offset)
  67. {
  68. u32 call[2] = { auipc_insn, jalr_insn };
  69. s32 imm;
  70. /* get and adjust new target address */
  71. imm = riscv_insn_extract_utype_itype_imm(auipc_insn, jalr_insn);
  72. imm -= patch_offset;
  73. /* update instructions */
  74. riscv_insn_insert_utype_itype_imm(&call[0], &call[1], imm);
  75. /* patch the call place again */
  76. patch_text_nosync(ptr, call, sizeof(u32) * 2);
  77. }
  78. static void riscv_alternative_fix_jal(void *ptr, u32 jal_insn, int patch_offset)
  79. {
  80. s32 imm;
  81. /* get and adjust new target address */
  82. imm = riscv_insn_extract_jtype_imm(jal_insn);
  83. imm -= patch_offset;
  84. /* update instruction */
  85. riscv_insn_insert_jtype_imm(&jal_insn, imm);
  86. /* patch the call place again */
  87. patch_text_nosync(ptr, &jal_insn, sizeof(u32));
  88. }
  89. void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
  90. int patch_offset)
  91. {
  92. int num_insn = len / sizeof(u32);
  93. int i;
  94. for (i = 0; i < num_insn; i++) {
  95. u32 insn = riscv_instruction_at(alt_ptr + i * sizeof(u32));
  96. /*
  97. * May be the start of an auipc + jalr pair
  98. * Needs to check that at least one more instruction
  99. * is in the list.
  100. */
  101. if (riscv_insn_is_auipc(insn) && i < num_insn - 1) {
  102. u32 insn2 = riscv_instruction_at(alt_ptr + (i + 1) * sizeof(u32));
  103. if (!riscv_insn_is_jalr(insn2))
  104. continue;
  105. /* if instruction pair is a call, it will use the ra register */
  106. if (RV_EXTRACT_RD_REG(insn) != 1)
  107. continue;
  108. riscv_alternative_fix_auipc_jalr(alt_ptr + i * sizeof(u32),
  109. insn, insn2, patch_offset);
  110. i++;
  111. }
  112. if (riscv_insn_is_jal(insn)) {
  113. s32 imm = riscv_insn_extract_jtype_imm(insn);
  114. /* Don't modify jumps inside the alternative block */
  115. if ((alt_ptr + i * sizeof(u32) + imm) >= alt_ptr &&
  116. (alt_ptr + i * sizeof(u32) + imm) < (alt_ptr + len))
  117. continue;
  118. riscv_alternative_fix_jal(alt_ptr + i * sizeof(u32),
  119. insn, patch_offset);
  120. }
  121. }
  122. }
  123. /*
  124. * This is called very early in the boot process (directly after we run
  125. * a feature detect on the boot CPU). No need to worry about other CPUs
  126. * here.
  127. */
  128. static void __init_or_module _apply_alternatives(struct alt_entry *begin,
  129. struct alt_entry *end,
  130. unsigned int stage)
  131. {
  132. struct cpu_manufacturer_info_t cpu_mfr_info;
  133. riscv_fill_cpu_mfr_info(&cpu_mfr_info);
  134. riscv_cpufeature_patch_func(begin, end, stage);
  135. if (!cpu_mfr_info.patch_func)
  136. return;
  137. cpu_mfr_info.patch_func(begin, end,
  138. cpu_mfr_info.arch_id,
  139. cpu_mfr_info.imp_id,
  140. stage);
  141. }
  142. #ifdef CONFIG_MMU
  143. static void __init apply_vdso_alternatives(void)
  144. {
  145. const Elf_Ehdr *hdr;
  146. const Elf_Shdr *shdr;
  147. const Elf_Shdr *alt;
  148. struct alt_entry *begin, *end;
  149. hdr = (Elf_Ehdr *)vdso_start;
  150. shdr = (void *)hdr + hdr->e_shoff;
  151. alt = find_section(hdr, shdr, ".alternative");
  152. if (!alt)
  153. return;
  154. begin = (void *)hdr + alt->sh_offset,
  155. end = (void *)hdr + alt->sh_offset + alt->sh_size,
  156. _apply_alternatives((struct alt_entry *)begin,
  157. (struct alt_entry *)end,
  158. RISCV_ALTERNATIVES_BOOT);
  159. }
  160. #else
  161. static void __init apply_vdso_alternatives(void) { }
  162. #endif
  163. void __init apply_boot_alternatives(void)
  164. {
  165. /* If called on non-boot cpu things could go wrong */
  166. WARN_ON(smp_processor_id() != 0);
  167. _apply_alternatives((struct alt_entry *)__alt_start,
  168. (struct alt_entry *)__alt_end,
  169. RISCV_ALTERNATIVES_BOOT);
  170. apply_vdso_alternatives();
  171. }
  172. /*
  173. * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
  174. *
  175. * Following requirements should be honoured for it to work correctly:
  176. * 1) It should use PC-relative addressing for accessing kernel symbols.
  177. * To achieve this we always use GCC cmodel=medany.
  178. * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
  179. * so disable compiler instrumentation when FTRACE is enabled.
  180. *
  181. * Currently, the above requirements are honoured by using custom CFLAGS
  182. * for alternative.o in kernel/Makefile.
  183. */
  184. void __init apply_early_boot_alternatives(void)
  185. {
  186. #ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
  187. _apply_alternatives((struct alt_entry *)__alt_start,
  188. (struct alt_entry *)__alt_end,
  189. RISCV_ALTERNATIVES_EARLY_BOOT);
  190. #endif
  191. }
  192. #ifdef CONFIG_MODULES
  193. void apply_module_alternatives(void *start, size_t length)
  194. {
  195. _apply_alternatives((struct alt_entry *)start,
  196. (struct alt_entry *)(start + length),
  197. RISCV_ALTERNATIVES_MODULE);
  198. }
  199. #endif