feature-fixups.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
  3. *
  4. * Modifications for ppc64:
  5. * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
  6. *
  7. * Copyright 2008 Michael Ellerman, IBM Corporation.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/jump_label.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/init.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/stop_machine.h>
  21. #include <asm/cputable.h>
  22. #include <asm/code-patching.h>
  23. #include <asm/page.h>
  24. #include <asm/sections.h>
  25. #include <asm/setup.h>
  26. #include <asm/security_features.h>
  27. #include <asm/firmware.h>
  28. struct fixup_entry {
  29. unsigned long mask;
  30. unsigned long value;
  31. long start_off;
  32. long end_off;
  33. long alt_start_off;
  34. long alt_end_off;
  35. };
  36. static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
  37. {
  38. /*
  39. * We store the offset to the code as a negative offset from
  40. * the start of the alt_entry, to support the VDSO. This
  41. * routine converts that back into an actual address.
  42. */
  43. return (unsigned int *)((unsigned long)fcur + offset);
  44. }
  45. static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
  46. unsigned int *alt_start, unsigned int *alt_end)
  47. {
  48. unsigned int instr;
  49. instr = *src;
  50. if (instr_is_relative_branch(*src)) {
  51. unsigned int *target = (unsigned int *)branch_target(src);
  52. /* Branch within the section doesn't need translating */
  53. if (target < alt_start || target > alt_end) {
  54. instr = translate_branch(dest, src);
  55. if (!instr)
  56. return 1;
  57. }
  58. }
  59. raw_patch_instruction(dest, instr);
  60. return 0;
  61. }
  62. static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
  63. {
  64. unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
  65. start = calc_addr(fcur, fcur->start_off);
  66. end = calc_addr(fcur, fcur->end_off);
  67. alt_start = calc_addr(fcur, fcur->alt_start_off);
  68. alt_end = calc_addr(fcur, fcur->alt_end_off);
  69. if ((alt_end - alt_start) > (end - start))
  70. return 1;
  71. if ((value & fcur->mask) == fcur->value)
  72. return 0;
  73. src = alt_start;
  74. dest = start;
  75. for (; src < alt_end; src++, dest++) {
  76. if (patch_alt_instruction(src, dest, alt_start, alt_end))
  77. return 1;
  78. }
  79. for (; dest < end; dest++)
  80. raw_patch_instruction(dest, PPC_INST_NOP);
  81. return 0;
  82. }
  83. void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  84. {
  85. struct fixup_entry *fcur, *fend;
  86. fcur = fixup_start;
  87. fend = fixup_end;
  88. for (; fcur < fend; fcur++) {
  89. if (patch_feature_section(value, fcur)) {
  90. WARN_ON(1);
  91. printk("Unable to patch feature section at %p - %p" \
  92. " with %p - %p\n",
  93. calc_addr(fcur, fcur->start_off),
  94. calc_addr(fcur, fcur->end_off),
  95. calc_addr(fcur, fcur->alt_start_off),
  96. calc_addr(fcur, fcur->alt_end_off));
  97. }
  98. }
  99. }
  100. #ifdef CONFIG_PPC_BOOK3S_64
  101. void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
  102. {
  103. unsigned int instrs[3], *dest;
  104. long *start, *end;
  105. int i;
  106. start = PTRRELOC(&__start___stf_entry_barrier_fixup),
  107. end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
  108. instrs[0] = 0x60000000; /* nop */
  109. instrs[1] = 0x60000000; /* nop */
  110. instrs[2] = 0x60000000; /* nop */
  111. i = 0;
  112. if (types & STF_BARRIER_FALLBACK) {
  113. instrs[i++] = 0x7d4802a6; /* mflr r10 */
  114. instrs[i++] = 0x60000000; /* branch patched below */
  115. instrs[i++] = 0x7d4803a6; /* mtlr r10 */
  116. } else if (types & STF_BARRIER_EIEIO) {
  117. instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
  118. } else if (types & STF_BARRIER_SYNC_ORI) {
  119. instrs[i++] = 0x7c0004ac; /* hwsync */
  120. instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
  121. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  122. }
  123. for (i = 0; start < end; start++, i++) {
  124. dest = (void *)start + *start;
  125. pr_devel("patching dest %lx\n", (unsigned long)dest);
  126. patch_instruction(dest, instrs[0]);
  127. if (types & STF_BARRIER_FALLBACK)
  128. patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
  129. BRANCH_SET_LINK);
  130. else
  131. patch_instruction(dest + 1, instrs[1]);
  132. patch_instruction(dest + 2, instrs[2]);
  133. }
  134. printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
  135. (types == STF_BARRIER_NONE) ? "no" :
  136. (types == STF_BARRIER_FALLBACK) ? "fallback" :
  137. (types == STF_BARRIER_EIEIO) ? "eieio" :
  138. (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
  139. : "unknown");
  140. }
  141. void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
  142. {
  143. unsigned int instrs[6], *dest;
  144. long *start, *end;
  145. int i;
  146. start = PTRRELOC(&__start___stf_exit_barrier_fixup),
  147. end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
  148. instrs[0] = 0x60000000; /* nop */
  149. instrs[1] = 0x60000000; /* nop */
  150. instrs[2] = 0x60000000; /* nop */
  151. instrs[3] = 0x60000000; /* nop */
  152. instrs[4] = 0x60000000; /* nop */
  153. instrs[5] = 0x60000000; /* nop */
  154. i = 0;
  155. if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
  156. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  157. instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
  158. instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
  159. } else {
  160. instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
  161. instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
  162. }
  163. instrs[i++] = 0x7c0004ac; /* hwsync */
  164. instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
  165. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  166. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  167. instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
  168. } else {
  169. instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
  170. }
  171. } else if (types & STF_BARRIER_EIEIO) {
  172. instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
  173. }
  174. for (i = 0; start < end; start++, i++) {
  175. dest = (void *)start + *start;
  176. pr_devel("patching dest %lx\n", (unsigned long)dest);
  177. patch_instruction(dest, instrs[0]);
  178. patch_instruction(dest + 1, instrs[1]);
  179. patch_instruction(dest + 2, instrs[2]);
  180. patch_instruction(dest + 3, instrs[3]);
  181. patch_instruction(dest + 4, instrs[4]);
  182. patch_instruction(dest + 5, instrs[5]);
  183. }
  184. printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
  185. (types == STF_BARRIER_NONE) ? "no" :
  186. (types == STF_BARRIER_FALLBACK) ? "fallback" :
  187. (types == STF_BARRIER_EIEIO) ? "eieio" :
  188. (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
  189. : "unknown");
  190. }
  191. static int __do_stf_barrier_fixups(void *data)
  192. {
  193. enum stf_barrier_type *types = data;
  194. do_stf_entry_barrier_fixups(*types);
  195. do_stf_exit_barrier_fixups(*types);
  196. return 0;
  197. }
  198. void do_stf_barrier_fixups(enum stf_barrier_type types)
  199. {
  200. /*
  201. * The call to the fallback entry flush, and the fallback/sync-ori exit
  202. * flush can not be safely patched in/out while other CPUs are executing
  203. * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
  204. * spin in the stop machine core with interrupts hard disabled.
  205. */
  206. stop_machine(__do_stf_barrier_fixups, &types, NULL);
  207. }
  208. void do_uaccess_flush_fixups(enum l1d_flush_type types)
  209. {
  210. unsigned int instrs[4], *dest;
  211. long *start, *end;
  212. int i;
  213. start = PTRRELOC(&__start___uaccess_flush_fixup);
  214. end = PTRRELOC(&__stop___uaccess_flush_fixup);
  215. instrs[0] = 0x60000000; /* nop */
  216. instrs[1] = 0x60000000; /* nop */
  217. instrs[2] = 0x60000000; /* nop */
  218. instrs[3] = 0x4e800020; /* blr */
  219. i = 0;
  220. if (types == L1D_FLUSH_FALLBACK) {
  221. instrs[3] = 0x60000000; /* nop */
  222. /* fallthrough to fallback flush */
  223. }
  224. if (types & L1D_FLUSH_ORI) {
  225. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  226. instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
  227. }
  228. if (types & L1D_FLUSH_MTTRIG)
  229. instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
  230. for (i = 0; start < end; start++, i++) {
  231. dest = (void *)start + *start;
  232. pr_devel("patching dest %lx\n", (unsigned long)dest);
  233. patch_instruction(dest, instrs[0]);
  234. patch_instruction((dest + 1), instrs[1]);
  235. patch_instruction((dest + 2), instrs[2]);
  236. patch_instruction((dest + 3), instrs[3]);
  237. }
  238. printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
  239. (types == L1D_FLUSH_NONE) ? "no" :
  240. (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
  241. (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
  242. ? "ori+mttrig type"
  243. : "ori type" :
  244. (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
  245. : "unknown");
  246. }
  247. static int __do_entry_flush_fixups(void *data)
  248. {
  249. enum l1d_flush_type types = *(enum l1d_flush_type *)data;
  250. unsigned int instrs[3], *dest;
  251. long *start, *end;
  252. int i;
  253. start = PTRRELOC(&__start___entry_flush_fixup);
  254. end = PTRRELOC(&__stop___entry_flush_fixup);
  255. instrs[0] = 0x60000000; /* nop */
  256. instrs[1] = 0x60000000; /* nop */
  257. instrs[2] = 0x60000000; /* nop */
  258. i = 0;
  259. if (types == L1D_FLUSH_FALLBACK) {
  260. instrs[i++] = 0x7d4802a6; /* mflr r10 */
  261. instrs[i++] = 0x60000000; /* branch patched below */
  262. instrs[i++] = 0x7d4803a6; /* mtlr r10 */
  263. }
  264. if (types & L1D_FLUSH_ORI) {
  265. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  266. instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
  267. }
  268. if (types & L1D_FLUSH_MTTRIG)
  269. instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
  270. for (i = 0; start < end; start++, i++) {
  271. dest = (void *)start + *start;
  272. pr_devel("patching dest %lx\n", (unsigned long)dest);
  273. patch_instruction(dest, instrs[0]);
  274. if (types == L1D_FLUSH_FALLBACK)
  275. patch_branch((dest + 1), (unsigned long)&entry_flush_fallback,
  276. BRANCH_SET_LINK);
  277. else
  278. patch_instruction((dest + 1), instrs[1]);
  279. patch_instruction((dest + 2), instrs[2]);
  280. }
  281. printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
  282. (types == L1D_FLUSH_NONE) ? "no" :
  283. (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
  284. (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
  285. ? "ori+mttrig type"
  286. : "ori type" :
  287. (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
  288. : "unknown");
  289. return 0;
  290. }
  291. void do_entry_flush_fixups(enum l1d_flush_type types)
  292. {
  293. /*
  294. * The call to the fallback flush can not be safely patched in/out while
  295. * other CPUs are executing it. So call __do_entry_flush_fixups() on one
  296. * CPU while all other CPUs spin in the stop machine core with interrupts
  297. * hard disabled.
  298. */
  299. stop_machine(__do_entry_flush_fixups, &types, NULL);
  300. }
  301. void do_rfi_flush_fixups(enum l1d_flush_type types)
  302. {
  303. unsigned int instrs[3], *dest;
  304. long *start, *end;
  305. int i;
  306. start = PTRRELOC(&__start___rfi_flush_fixup),
  307. end = PTRRELOC(&__stop___rfi_flush_fixup);
  308. instrs[0] = 0x60000000; /* nop */
  309. instrs[1] = 0x60000000; /* nop */
  310. instrs[2] = 0x60000000; /* nop */
  311. if (types & L1D_FLUSH_FALLBACK)
  312. /* b .+16 to fallback flush */
  313. instrs[0] = 0x48000010;
  314. i = 0;
  315. if (types & L1D_FLUSH_ORI) {
  316. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  317. instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
  318. }
  319. if (types & L1D_FLUSH_MTTRIG)
  320. instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
  321. for (i = 0; start < end; start++, i++) {
  322. dest = (void *)start + *start;
  323. pr_devel("patching dest %lx\n", (unsigned long)dest);
  324. patch_instruction(dest, instrs[0]);
  325. patch_instruction(dest + 1, instrs[1]);
  326. patch_instruction(dest + 2, instrs[2]);
  327. }
  328. printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
  329. (types == L1D_FLUSH_NONE) ? "no" :
  330. (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
  331. (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
  332. ? "ori+mttrig type"
  333. : "ori type" :
  334. (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
  335. : "unknown");
  336. }
  337. void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
  338. {
  339. unsigned int instr, *dest;
  340. long *start, *end;
  341. int i;
  342. start = fixup_start;
  343. end = fixup_end;
  344. instr = 0x60000000; /* nop */
  345. if (enable) {
  346. pr_info("barrier-nospec: using ORI speculation barrier\n");
  347. instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  348. }
  349. for (i = 0; start < end; start++, i++) {
  350. dest = (void *)start + *start;
  351. pr_devel("patching dest %lx\n", (unsigned long)dest);
  352. patch_instruction(dest, instr);
  353. }
  354. printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
  355. }
  356. #endif /* CONFIG_PPC_BOOK3S_64 */
  357. #ifdef CONFIG_PPC_BARRIER_NOSPEC
  358. void do_barrier_nospec_fixups(bool enable)
  359. {
  360. void *start, *end;
  361. start = PTRRELOC(&__start___barrier_nospec_fixup),
  362. end = PTRRELOC(&__stop___barrier_nospec_fixup);
  363. do_barrier_nospec_fixups_range(enable, start, end);
  364. }
  365. #endif /* CONFIG_PPC_BARRIER_NOSPEC */
  366. #ifdef CONFIG_PPC_FSL_BOOK3E
  367. void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
  368. {
  369. unsigned int instr[2], *dest;
  370. long *start, *end;
  371. int i;
  372. start = fixup_start;
  373. end = fixup_end;
  374. instr[0] = PPC_INST_NOP;
  375. instr[1] = PPC_INST_NOP;
  376. if (enable) {
  377. pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
  378. instr[0] = PPC_INST_ISYNC;
  379. instr[1] = PPC_INST_SYNC;
  380. }
  381. for (i = 0; start < end; start++, i++) {
  382. dest = (void *)start + *start;
  383. pr_devel("patching dest %lx\n", (unsigned long)dest);
  384. patch_instruction(dest, instr[0]);
  385. patch_instruction(dest + 1, instr[1]);
  386. }
  387. printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
  388. }
  389. static void patch_btb_flush_section(long *curr)
  390. {
  391. unsigned int *start, *end;
  392. start = (void *)curr + *curr;
  393. end = (void *)curr + *(curr + 1);
  394. for (; start < end; start++) {
  395. pr_devel("patching dest %lx\n", (unsigned long)start);
  396. patch_instruction(start, PPC_INST_NOP);
  397. }
  398. }
  399. void do_btb_flush_fixups(void)
  400. {
  401. long *start, *end;
  402. start = PTRRELOC(&__start__btb_flush_fixup);
  403. end = PTRRELOC(&__stop__btb_flush_fixup);
  404. for (; start < end; start += 2)
  405. patch_btb_flush_section(start);
  406. }
  407. #endif /* CONFIG_PPC_FSL_BOOK3E */
  408. void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  409. {
  410. long *start, *end;
  411. unsigned int *dest;
  412. if (!(value & CPU_FTR_LWSYNC))
  413. return ;
  414. start = fixup_start;
  415. end = fixup_end;
  416. for (; start < end; start++) {
  417. dest = (void *)start + *start;
  418. raw_patch_instruction(dest, PPC_INST_LWSYNC);
  419. }
  420. }
  421. static void do_final_fixups(void)
  422. {
  423. #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
  424. int *src, *dest;
  425. unsigned long length;
  426. if (PHYSICAL_START == 0)
  427. return;
  428. src = (int *)(KERNELBASE + PHYSICAL_START);
  429. dest = (int *)KERNELBASE;
  430. length = (__end_interrupts - _stext) / sizeof(int);
  431. while (length--) {
  432. raw_patch_instruction(dest, *src);
  433. src++;
  434. dest++;
  435. }
  436. #endif
  437. }
  438. static unsigned long __initdata saved_cpu_features;
  439. static unsigned int __initdata saved_mmu_features;
  440. #ifdef CONFIG_PPC64
  441. static unsigned long __initdata saved_firmware_features;
  442. #endif
  443. void __init apply_feature_fixups(void)
  444. {
  445. struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
  446. *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
  447. *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
  448. /*
  449. * Apply the CPU-specific and firmware specific fixups to kernel text
  450. * (nop out sections not relevant to this CPU or this firmware).
  451. */
  452. do_feature_fixups(spec->cpu_features,
  453. PTRRELOC(&__start___ftr_fixup),
  454. PTRRELOC(&__stop___ftr_fixup));
  455. do_feature_fixups(spec->mmu_features,
  456. PTRRELOC(&__start___mmu_ftr_fixup),
  457. PTRRELOC(&__stop___mmu_ftr_fixup));
  458. do_lwsync_fixups(spec->cpu_features,
  459. PTRRELOC(&__start___lwsync_fixup),
  460. PTRRELOC(&__stop___lwsync_fixup));
  461. #ifdef CONFIG_PPC64
  462. saved_firmware_features = powerpc_firmware_features;
  463. do_feature_fixups(powerpc_firmware_features,
  464. &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
  465. #endif
  466. do_final_fixups();
  467. }
  468. void __init setup_feature_keys(void)
  469. {
  470. /*
  471. * Initialise jump label. This causes all the cpu/mmu_has_feature()
  472. * checks to take on their correct polarity based on the current set of
  473. * CPU/MMU features.
  474. */
  475. jump_label_init();
  476. cpu_feature_keys_init();
  477. mmu_feature_keys_init();
  478. }
  479. static int __init check_features(void)
  480. {
  481. WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
  482. "CPU features changed after feature patching!\n");
  483. WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
  484. "MMU features changed after feature patching!\n");
  485. #ifdef CONFIG_PPC64
  486. WARN(saved_firmware_features != powerpc_firmware_features,
  487. "Firmware features changed after feature patching!\n");
  488. #endif
  489. return 0;
  490. }
  491. late_initcall(check_features);
  492. #ifdef CONFIG_FTR_FIXUP_SELFTEST
  493. #define check(x) \
  494. if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
  495. /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
  496. static struct fixup_entry fixup;
  497. static long calc_offset(struct fixup_entry *entry, unsigned int *p)
  498. {
  499. return (unsigned long)p - (unsigned long)entry;
  500. }
  501. static void test_basic_patching(void)
  502. {
  503. extern unsigned int ftr_fixup_test1[];
  504. extern unsigned int end_ftr_fixup_test1[];
  505. extern unsigned int ftr_fixup_test1_orig[];
  506. extern unsigned int ftr_fixup_test1_expected[];
  507. int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
  508. fixup.value = fixup.mask = 8;
  509. fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
  510. fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
  511. fixup.alt_start_off = fixup.alt_end_off = 0;
  512. /* Sanity check */
  513. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
  514. /* Check we don't patch if the value matches */
  515. patch_feature_section(8, &fixup);
  516. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
  517. /* Check we do patch if the value doesn't match */
  518. patch_feature_section(0, &fixup);
  519. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
  520. /* Check we do patch if the mask doesn't match */
  521. memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
  522. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
  523. patch_feature_section(~8, &fixup);
  524. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
  525. }
  526. static void test_alternative_patching(void)
  527. {
  528. extern unsigned int ftr_fixup_test2[];
  529. extern unsigned int end_ftr_fixup_test2[];
  530. extern unsigned int ftr_fixup_test2_orig[];
  531. extern unsigned int ftr_fixup_test2_alt[];
  532. extern unsigned int ftr_fixup_test2_expected[];
  533. int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
  534. fixup.value = fixup.mask = 0xF;
  535. fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
  536. fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
  537. fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
  538. fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
  539. /* Sanity check */
  540. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
  541. /* Check we don't patch if the value matches */
  542. patch_feature_section(0xF, &fixup);
  543. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
  544. /* Check we do patch if the value doesn't match */
  545. patch_feature_section(0, &fixup);
  546. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
  547. /* Check we do patch if the mask doesn't match */
  548. memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
  549. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
  550. patch_feature_section(~0xF, &fixup);
  551. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
  552. }
  553. static void test_alternative_case_too_big(void)
  554. {
  555. extern unsigned int ftr_fixup_test3[];
  556. extern unsigned int end_ftr_fixup_test3[];
  557. extern unsigned int ftr_fixup_test3_orig[];
  558. extern unsigned int ftr_fixup_test3_alt[];
  559. int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
  560. fixup.value = fixup.mask = 0xC;
  561. fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
  562. fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
  563. fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
  564. fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
  565. /* Sanity check */
  566. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  567. /* Expect nothing to be patched, and the error returned to us */
  568. check(patch_feature_section(0xF, &fixup) == 1);
  569. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  570. check(patch_feature_section(0, &fixup) == 1);
  571. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  572. check(patch_feature_section(~0xF, &fixup) == 1);
  573. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  574. }
  575. static void test_alternative_case_too_small(void)
  576. {
  577. extern unsigned int ftr_fixup_test4[];
  578. extern unsigned int end_ftr_fixup_test4[];
  579. extern unsigned int ftr_fixup_test4_orig[];
  580. extern unsigned int ftr_fixup_test4_alt[];
  581. extern unsigned int ftr_fixup_test4_expected[];
  582. int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
  583. unsigned long flag;
  584. /* Check a high-bit flag */
  585. flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
  586. fixup.value = fixup.mask = flag;
  587. fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
  588. fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
  589. fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
  590. fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
  591. /* Sanity check */
  592. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
  593. /* Check we don't patch if the value matches */
  594. patch_feature_section(flag, &fixup);
  595. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
  596. /* Check we do patch if the value doesn't match */
  597. patch_feature_section(0, &fixup);
  598. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
  599. /* Check we do patch if the mask doesn't match */
  600. memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
  601. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
  602. patch_feature_section(~flag, &fixup);
  603. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
  604. }
  605. static void test_alternative_case_with_branch(void)
  606. {
  607. extern unsigned int ftr_fixup_test5[];
  608. extern unsigned int end_ftr_fixup_test5[];
  609. extern unsigned int ftr_fixup_test5_expected[];
  610. int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
  611. check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
  612. }
  613. static void test_alternative_case_with_external_branch(void)
  614. {
  615. extern unsigned int ftr_fixup_test6[];
  616. extern unsigned int end_ftr_fixup_test6[];
  617. extern unsigned int ftr_fixup_test6_expected[];
  618. int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
  619. check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
  620. }
  621. static void test_alternative_case_with_branch_to_end(void)
  622. {
  623. extern unsigned int ftr_fixup_test7[];
  624. extern unsigned int end_ftr_fixup_test7[];
  625. extern unsigned int ftr_fixup_test7_expected[];
  626. int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
  627. check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
  628. }
  629. static void test_cpu_macros(void)
  630. {
  631. extern u8 ftr_fixup_test_FTR_macros[];
  632. extern u8 ftr_fixup_test_FTR_macros_expected[];
  633. unsigned long size = ftr_fixup_test_FTR_macros_expected -
  634. ftr_fixup_test_FTR_macros;
  635. /* The fixups have already been done for us during boot */
  636. check(memcmp(ftr_fixup_test_FTR_macros,
  637. ftr_fixup_test_FTR_macros_expected, size) == 0);
  638. }
  639. static void test_fw_macros(void)
  640. {
  641. #ifdef CONFIG_PPC64
  642. extern u8 ftr_fixup_test_FW_FTR_macros[];
  643. extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
  644. unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
  645. ftr_fixup_test_FW_FTR_macros;
  646. /* The fixups have already been done for us during boot */
  647. check(memcmp(ftr_fixup_test_FW_FTR_macros,
  648. ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
  649. #endif
  650. }
  651. static void test_lwsync_macros(void)
  652. {
  653. extern u8 lwsync_fixup_test[];
  654. extern u8 end_lwsync_fixup_test[];
  655. extern u8 lwsync_fixup_test_expected_LWSYNC[];
  656. extern u8 lwsync_fixup_test_expected_SYNC[];
  657. unsigned long size = end_lwsync_fixup_test -
  658. lwsync_fixup_test;
  659. /* The fixups have already been done for us during boot */
  660. if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
  661. check(memcmp(lwsync_fixup_test,
  662. lwsync_fixup_test_expected_LWSYNC, size) == 0);
  663. } else {
  664. check(memcmp(lwsync_fixup_test,
  665. lwsync_fixup_test_expected_SYNC, size) == 0);
  666. }
  667. }
  668. static int __init test_feature_fixups(void)
  669. {
  670. printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
  671. test_basic_patching();
  672. test_alternative_patching();
  673. test_alternative_case_too_big();
  674. test_alternative_case_too_small();
  675. test_alternative_case_with_branch();
  676. test_alternative_case_with_external_branch();
  677. test_alternative_case_with_branch_to_end();
  678. test_cpu_macros();
  679. test_fw_macros();
  680. test_lwsync_macros();
  681. return 0;
  682. }
  683. late_initcall(test_feature_fixups);
  684. #endif /* CONFIG_FTR_FIXUP_SELFTEST */