module.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/kernel/module.c
  4. *
  5. * Copyright (C) 2002 Russell King.
  6. * Modified for nommu by Hyok S. Choi
  7. *
  8. * Module allocation method suggested by Andi Kleen.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/moduleloader.h>
  12. #include <linux/kernel.h>
  13. #include <linux/mm.h>
  14. #include <linux/elf.h>
  15. #include <linux/fs.h>
  16. #include <linux/string.h>
  17. #include <asm/sections.h>
  18. #include <asm/smp_plat.h>
  19. #include <asm/unwind.h>
  20. #include <asm/opcodes.h>
  21. bool module_init_section(const char *name)
  22. {
  23. return strstarts(name, ".init") ||
  24. strstarts(name, ".ARM.extab.init") ||
  25. strstarts(name, ".ARM.exidx.init");
  26. }
  27. bool module_exit_section(const char *name)
  28. {
  29. return strstarts(name, ".exit") ||
  30. strstarts(name, ".ARM.extab.exit") ||
  31. strstarts(name, ".ARM.exidx.exit");
  32. }
  33. #ifdef CONFIG_ARM_HAS_GROUP_RELOCS
  34. /*
  35. * This implements the partitioning algorithm for group relocations as
  36. * documented in the ARM AArch32 ELF psABI (IHI 0044).
  37. *
  38. * A single PC-relative symbol reference is divided in up to 3 add or subtract
  39. * operations, where the final one could be incorporated into a load/store
  40. * instruction with immediate offset. E.g.,
  41. *
  42. * ADD Rd, PC, #... or ADD Rd, PC, #...
  43. * ADD Rd, Rd, #... ADD Rd, Rd, #...
  44. * LDR Rd, [Rd, #...] ADD Rd, Rd, #...
  45. *
  46. * The latter has a guaranteed range of only 16 MiB (3x8 == 24 bits), so it is
  47. * of limited use in the kernel. However, the ADD/ADD/LDR combo has a range of
  48. * -/+ 256 MiB, (2x8 + 12 == 28 bits), which means it has sufficient range for
  49. * any in-kernel symbol reference (unless module PLTs are being used).
  50. *
  51. * The main advantage of this approach over the typical pattern using a literal
  52. * load is that literal loads may miss in the D-cache, and generally lead to
  53. * lower cache efficiency for variables that are referenced often from many
  54. * different places in the code.
  55. */
  56. static u32 get_group_rem(u32 group, u32 *offset)
  57. {
  58. u32 val = *offset;
  59. u32 shift;
  60. do {
  61. shift = val ? (31 - __fls(val)) & ~1 : 32;
  62. *offset = val;
  63. if (!val)
  64. break;
  65. val &= 0xffffff >> shift;
  66. } while (group--);
  67. return shift;
  68. }
  69. #endif
  70. int
  71. apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
  72. unsigned int relindex, struct module *module)
  73. {
  74. Elf32_Shdr *symsec = sechdrs + symindex;
  75. Elf32_Shdr *relsec = sechdrs + relindex;
  76. Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
  77. Elf32_Rel *rel = (void *)relsec->sh_addr;
  78. unsigned int i;
  79. for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
  80. unsigned long loc;
  81. Elf32_Sym *sym;
  82. const char *symname;
  83. #ifdef CONFIG_ARM_HAS_GROUP_RELOCS
  84. u32 shift, group = 1;
  85. #endif
  86. s32 offset;
  87. u32 tmp;
  88. #ifdef CONFIG_THUMB2_KERNEL
  89. u32 upper, lower, sign, j1, j2;
  90. #endif
  91. offset = ELF32_R_SYM(rel->r_info);
  92. if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
  93. pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
  94. module->name, relindex, i);
  95. return -ENOEXEC;
  96. }
  97. sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
  98. symname = strtab + sym->st_name;
  99. if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
  100. pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
  101. module->name, relindex, i, symname,
  102. rel->r_offset, dstsec->sh_size);
  103. return -ENOEXEC;
  104. }
  105. loc = dstsec->sh_addr + rel->r_offset;
  106. switch (ELF32_R_TYPE(rel->r_info)) {
  107. case R_ARM_NONE:
  108. /* ignore */
  109. break;
  110. case R_ARM_ABS32:
  111. case R_ARM_TARGET1:
  112. *(u32 *)loc += sym->st_value;
  113. break;
  114. case R_ARM_PC24:
  115. case R_ARM_CALL:
  116. case R_ARM_JUMP24:
  117. if (sym->st_value & 3) {
  118. pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (ARM -> Thumb)\n",
  119. module->name, relindex, i, symname);
  120. return -ENOEXEC;
  121. }
  122. offset = __mem_to_opcode_arm(*(u32 *)loc);
  123. offset = (offset & 0x00ffffff) << 2;
  124. offset = sign_extend32(offset, 25);
  125. offset += sym->st_value - loc;
  126. /*
  127. * Route through a PLT entry if 'offset' exceeds the
  128. * supported range. Note that 'offset + loc + 8'
  129. * contains the absolute jump target, i.e.,
  130. * @sym + addend, corrected for the +8 PC bias.
  131. */
  132. if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
  133. (offset <= (s32)0xfe000000 ||
  134. offset >= (s32)0x02000000))
  135. offset = get_module_plt(module, loc,
  136. offset + loc + 8)
  137. - loc - 8;
  138. if (offset <= (s32)0xfe000000 ||
  139. offset >= (s32)0x02000000) {
  140. pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
  141. module->name, relindex, i, symname,
  142. ELF32_R_TYPE(rel->r_info), loc,
  143. sym->st_value);
  144. return -ENOEXEC;
  145. }
  146. offset >>= 2;
  147. offset &= 0x00ffffff;
  148. *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
  149. *(u32 *)loc |= __opcode_to_mem_arm(offset);
  150. break;
  151. case R_ARM_V4BX:
  152. /* Preserve Rm and the condition code. Alter
  153. * other bits to re-code instruction as
  154. * MOV PC,Rm.
  155. */
  156. *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
  157. *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
  158. break;
  159. case R_ARM_PREL31:
  160. offset = (*(s32 *)loc << 1) >> 1; /* sign extend */
  161. offset += sym->st_value - loc;
  162. if (offset >= 0x40000000 || offset < -0x40000000) {
  163. pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
  164. module->name, relindex, i, symname,
  165. ELF32_R_TYPE(rel->r_info), loc,
  166. sym->st_value);
  167. return -ENOEXEC;
  168. }
  169. *(u32 *)loc &= 0x80000000;
  170. *(u32 *)loc |= offset & 0x7fffffff;
  171. break;
  172. case R_ARM_REL32:
  173. *(u32 *)loc += sym->st_value - loc;
  174. break;
  175. case R_ARM_MOVW_ABS_NC:
  176. case R_ARM_MOVT_ABS:
  177. case R_ARM_MOVW_PREL_NC:
  178. case R_ARM_MOVT_PREL:
  179. offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
  180. offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
  181. offset = sign_extend32(offset, 15);
  182. offset += sym->st_value;
  183. if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL ||
  184. ELF32_R_TYPE(rel->r_info) == R_ARM_MOVW_PREL_NC)
  185. offset -= loc;
  186. if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS ||
  187. ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL)
  188. offset >>= 16;
  189. tmp &= 0xfff0f000;
  190. tmp |= ((offset & 0xf000) << 4) |
  191. (offset & 0x0fff);
  192. *(u32 *)loc = __opcode_to_mem_arm(tmp);
  193. break;
  194. #ifdef CONFIG_ARM_HAS_GROUP_RELOCS
  195. case R_ARM_ALU_PC_G0_NC:
  196. group = 0;
  197. fallthrough;
  198. case R_ARM_ALU_PC_G1_NC:
  199. tmp = __mem_to_opcode_arm(*(u32 *)loc);
  200. offset = ror32(tmp & 0xff, (tmp & 0xf00) >> 7);
  201. if (tmp & BIT(22))
  202. offset = -offset;
  203. offset += sym->st_value - loc;
  204. if (offset < 0) {
  205. offset = -offset;
  206. tmp = (tmp & ~BIT(23)) | BIT(22); // SUB opcode
  207. } else {
  208. tmp = (tmp & ~BIT(22)) | BIT(23); // ADD opcode
  209. }
  210. shift = get_group_rem(group, &offset);
  211. if (shift < 24) {
  212. offset >>= 24 - shift;
  213. offset |= (shift + 8) << 7;
  214. }
  215. *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
  216. break;
  217. case R_ARM_LDR_PC_G2:
  218. tmp = __mem_to_opcode_arm(*(u32 *)loc);
  219. offset = tmp & 0xfff;
  220. if (~tmp & BIT(23)) // U bit cleared?
  221. offset = -offset;
  222. offset += sym->st_value - loc;
  223. if (offset < 0) {
  224. offset = -offset;
  225. tmp &= ~BIT(23); // clear U bit
  226. } else {
  227. tmp |= BIT(23); // set U bit
  228. }
  229. get_group_rem(2, &offset);
  230. if (offset > 0xfff) {
  231. pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
  232. module->name, relindex, i, symname,
  233. ELF32_R_TYPE(rel->r_info), loc,
  234. sym->st_value);
  235. return -ENOEXEC;
  236. }
  237. *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
  238. break;
  239. #endif
  240. #ifdef CONFIG_THUMB2_KERNEL
  241. case R_ARM_THM_CALL:
  242. case R_ARM_THM_JUMP24:
  243. /*
  244. * For function symbols, only Thumb addresses are
  245. * allowed (no interworking).
  246. *
  247. * For non-function symbols, the destination
  248. * has no specific ARM/Thumb disposition, so
  249. * the branch is resolved under the assumption
  250. * that interworking is not required.
  251. */
  252. if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
  253. !(sym->st_value & 1)) {
  254. pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (Thumb -> ARM)\n",
  255. module->name, relindex, i, symname);
  256. return -ENOEXEC;
  257. }
  258. upper = __mem_to_opcode_thumb16(*(u16 *)loc);
  259. lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
  260. /*
  261. * 25 bit signed address range (Thumb-2 BL and B.W
  262. * instructions):
  263. * S:I1:I2:imm10:imm11:0
  264. * where:
  265. * S = upper[10] = offset[24]
  266. * I1 = ~(J1 ^ S) = offset[23]
  267. * I2 = ~(J2 ^ S) = offset[22]
  268. * imm10 = upper[9:0] = offset[21:12]
  269. * imm11 = lower[10:0] = offset[11:1]
  270. * J1 = lower[13]
  271. * J2 = lower[11]
  272. */
  273. sign = (upper >> 10) & 1;
  274. j1 = (lower >> 13) & 1;
  275. j2 = (lower >> 11) & 1;
  276. offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
  277. ((~(j2 ^ sign) & 1) << 22) |
  278. ((upper & 0x03ff) << 12) |
  279. ((lower & 0x07ff) << 1);
  280. offset = sign_extend32(offset, 24);
  281. offset += sym->st_value - loc;
  282. /*
  283. * Route through a PLT entry if 'offset' exceeds the
  284. * supported range.
  285. */
  286. if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
  287. (offset <= (s32)0xff000000 ||
  288. offset >= (s32)0x01000000))
  289. offset = get_module_plt(module, loc,
  290. offset + loc + 4)
  291. - loc - 4;
  292. if (offset <= (s32)0xff000000 ||
  293. offset >= (s32)0x01000000) {
  294. pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
  295. module->name, relindex, i, symname,
  296. ELF32_R_TYPE(rel->r_info), loc,
  297. sym->st_value);
  298. return -ENOEXEC;
  299. }
  300. sign = (offset >> 24) & 1;
  301. j1 = sign ^ (~(offset >> 23) & 1);
  302. j2 = sign ^ (~(offset >> 22) & 1);
  303. upper = (u16)((upper & 0xf800) | (sign << 10) |
  304. ((offset >> 12) & 0x03ff));
  305. lower = (u16)((lower & 0xd000) |
  306. (j1 << 13) | (j2 << 11) |
  307. ((offset >> 1) & 0x07ff));
  308. *(u16 *)loc = __opcode_to_mem_thumb16(upper);
  309. *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
  310. break;
  311. case R_ARM_THM_MOVW_ABS_NC:
  312. case R_ARM_THM_MOVT_ABS:
  313. case R_ARM_THM_MOVW_PREL_NC:
  314. case R_ARM_THM_MOVT_PREL:
  315. upper = __mem_to_opcode_thumb16(*(u16 *)loc);
  316. lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
  317. /*
  318. * MOVT/MOVW instructions encoding in Thumb-2:
  319. *
  320. * i = upper[10]
  321. * imm4 = upper[3:0]
  322. * imm3 = lower[14:12]
  323. * imm8 = lower[7:0]
  324. *
  325. * imm16 = imm4:i:imm3:imm8
  326. */
  327. offset = ((upper & 0x000f) << 12) |
  328. ((upper & 0x0400) << 1) |
  329. ((lower & 0x7000) >> 4) | (lower & 0x00ff);
  330. offset = sign_extend32(offset, 15);
  331. offset += sym->st_value;
  332. if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL ||
  333. ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVW_PREL_NC)
  334. offset -= loc;
  335. if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS ||
  336. ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL)
  337. offset >>= 16;
  338. upper = (u16)((upper & 0xfbf0) |
  339. ((offset & 0xf000) >> 12) |
  340. ((offset & 0x0800) >> 1));
  341. lower = (u16)((lower & 0x8f00) |
  342. ((offset & 0x0700) << 4) |
  343. (offset & 0x00ff));
  344. *(u16 *)loc = __opcode_to_mem_thumb16(upper);
  345. *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
  346. break;
  347. #endif
  348. default:
  349. pr_err("%s: unknown relocation: %u\n",
  350. module->name, ELF32_R_TYPE(rel->r_info));
  351. return -ENOEXEC;
  352. }
  353. }
  354. return 0;
  355. }
  356. static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
  357. const Elf_Shdr *sechdrs, const char *name)
  358. {
  359. const Elf_Shdr *s, *se;
  360. const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  361. for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
  362. if (strcmp(name, secstrs + s->sh_name) == 0)
  363. return s;
  364. return NULL;
  365. }
  366. extern void fixup_pv_table(const void *, unsigned long);
  367. extern void fixup_smp(const void *, unsigned long);
  368. int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
  369. struct module *mod)
  370. {
  371. const Elf_Shdr *s = NULL;
  372. #ifdef CONFIG_ARM_UNWIND
  373. const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  374. const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
  375. struct list_head *unwind_list = &mod->arch.unwind_list;
  376. INIT_LIST_HEAD(unwind_list);
  377. mod->arch.init_table = NULL;
  378. for (s = sechdrs; s < sechdrs_end; s++) {
  379. const char *secname = secstrs + s->sh_name;
  380. const char *txtname;
  381. const Elf_Shdr *txt_sec;
  382. if (!(s->sh_flags & SHF_ALLOC) ||
  383. s->sh_type != ELF_SECTION_UNWIND)
  384. continue;
  385. if (!strcmp(".ARM.exidx", secname))
  386. txtname = ".text";
  387. else
  388. txtname = secname + strlen(".ARM.exidx");
  389. txt_sec = find_mod_section(hdr, sechdrs, txtname);
  390. if (txt_sec) {
  391. struct unwind_table *table =
  392. unwind_table_add(s->sh_addr,
  393. s->sh_size,
  394. txt_sec->sh_addr,
  395. txt_sec->sh_size);
  396. list_add(&table->mod_list, unwind_list);
  397. /* save init table for module_arch_freeing_init */
  398. if (strcmp(".ARM.exidx.init.text", secname) == 0)
  399. mod->arch.init_table = table;
  400. }
  401. }
  402. #endif
  403. #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
  404. s = find_mod_section(hdr, sechdrs, ".pv_table");
  405. if (s)
  406. fixup_pv_table((void *)s->sh_addr, s->sh_size);
  407. #endif
  408. s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
  409. if (s && !is_smp())
  410. #ifdef CONFIG_SMP_ON_UP
  411. fixup_smp((void *)s->sh_addr, s->sh_size);
  412. #else
  413. return -EINVAL;
  414. #endif
  415. return 0;
  416. }
  417. void
  418. module_arch_cleanup(struct module *mod)
  419. {
  420. #ifdef CONFIG_ARM_UNWIND
  421. struct unwind_table *tmp;
  422. struct unwind_table *n;
  423. list_for_each_entry_safe(tmp, n,
  424. &mod->arch.unwind_list, mod_list) {
  425. list_del(&tmp->mod_list);
  426. unwind_table_del(tmp);
  427. }
  428. mod->arch.init_table = NULL;
  429. #endif
  430. }
  431. void __weak module_arch_freeing_init(struct module *mod)
  432. {
  433. #ifdef CONFIG_ARM_UNWIND
  434. struct unwind_table *init = mod->arch.init_table;
  435. if (init) {
  436. mod->arch.init_table = NULL;
  437. list_del(&init->mod_list);
  438. unwind_table_del(init);
  439. }
  440. #endif
  441. }