code-patching.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2008 Michael Ellerman, IBM Corporation.
  4. */
  5. #include <linux/kprobes.h>
  6. #include <linux/mmu_context.h>
  7. #include <linux/random.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/init.h>
  10. #include <linux/cpuhotplug.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/jump_label.h>
  13. #include <asm/debug.h>
  14. #include <asm/pgalloc.h>
  15. #include <asm/tlb.h>
  16. #include <asm/tlbflush.h>
  17. #include <asm/page.h>
  18. #include <asm/code-patching.h>
  19. #include <asm/inst.h>
  20. static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
  21. {
  22. if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) {
  23. /* For big endian correctness: plain address would use the wrong half */
  24. u32 val32 = val;
  25. __put_kernel_nofault(patch_addr, &val32, u32, failed);
  26. } else {
  27. __put_kernel_nofault(patch_addr, &val, u64, failed);
  28. }
  29. asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
  30. "r" (exec_addr));
  31. return 0;
  32. failed:
  33. mb(); /* sync */
  34. return -EPERM;
  35. }
  36. int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
  37. {
  38. if (ppc_inst_prefixed(instr))
  39. return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true);
  40. else
  41. return __patch_mem(addr, ppc_inst_val(instr), addr, false);
  42. }
  43. struct patch_context {
  44. union {
  45. struct vm_struct *area;
  46. struct mm_struct *mm;
  47. };
  48. unsigned long addr;
  49. pte_t *pte;
  50. };
  51. static DEFINE_PER_CPU(struct patch_context, cpu_patching_context);
  52. static int map_patch_area(void *addr, unsigned long text_poke_addr);
  53. static void unmap_patch_area(unsigned long addr);
  54. static bool mm_patch_enabled(void)
  55. {
  56. return IS_ENABLED(CONFIG_SMP) && radix_enabled();
  57. }
  58. /*
  59. * The following applies for Radix MMU. Hash MMU has different requirements,
  60. * and so is not supported.
  61. *
  62. * Changing mm requires context synchronising instructions on both sides of
  63. * the context switch, as well as a hwsync between the last instruction for
  64. * which the address of an associated storage access was translated using
  65. * the current context.
  66. *
  67. * switch_mm_irqs_off() performs an isync after the context switch. It is
  68. * the responsibility of the caller to perform the CSI and hwsync before
  69. * starting/stopping the temp mm.
  70. */
  71. static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm)
  72. {
  73. struct mm_struct *orig_mm = current->active_mm;
  74. lockdep_assert_irqs_disabled();
  75. switch_mm_irqs_off(orig_mm, temp_mm, current);
  76. WARN_ON(!mm_is_thread_local(temp_mm));
  77. suspend_breakpoints();
  78. return orig_mm;
  79. }
  80. static void stop_using_temp_mm(struct mm_struct *temp_mm,
  81. struct mm_struct *orig_mm)
  82. {
  83. lockdep_assert_irqs_disabled();
  84. switch_mm_irqs_off(temp_mm, orig_mm, current);
  85. restore_breakpoints();
  86. }
  87. static int text_area_cpu_up(unsigned int cpu)
  88. {
  89. struct vm_struct *area;
  90. unsigned long addr;
  91. int err;
  92. area = get_vm_area(PAGE_SIZE, 0);
  93. if (!area) {
  94. WARN_ONCE(1, "Failed to create text area for cpu %d\n",
  95. cpu);
  96. return -1;
  97. }
  98. // Map/unmap the area to ensure all page tables are pre-allocated
  99. addr = (unsigned long)area->addr;
  100. err = map_patch_area(empty_zero_page, addr);
  101. if (err)
  102. return err;
  103. unmap_patch_area(addr);
  104. this_cpu_write(cpu_patching_context.area, area);
  105. this_cpu_write(cpu_patching_context.addr, addr);
  106. this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr));
  107. return 0;
  108. }
  109. static int text_area_cpu_down(unsigned int cpu)
  110. {
  111. free_vm_area(this_cpu_read(cpu_patching_context.area));
  112. this_cpu_write(cpu_patching_context.area, NULL);
  113. this_cpu_write(cpu_patching_context.addr, 0);
  114. this_cpu_write(cpu_patching_context.pte, NULL);
  115. return 0;
  116. }
  117. static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr)
  118. {
  119. struct mmu_gather tlb;
  120. tlb_gather_mmu(&tlb, mm);
  121. free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0);
  122. mmput(mm);
  123. }
  124. static int text_area_cpu_up_mm(unsigned int cpu)
  125. {
  126. struct mm_struct *mm;
  127. unsigned long addr;
  128. pte_t *pte;
  129. spinlock_t *ptl;
  130. mm = mm_alloc();
  131. if (WARN_ON(!mm))
  132. goto fail_no_mm;
  133. /*
  134. * Choose a random page-aligned address from the interval
  135. * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE].
  136. * The lower address bound is PAGE_SIZE to avoid the zero-page.
  137. */
  138. addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT;
  139. /*
  140. * PTE allocation uses GFP_KERNEL which means we need to
  141. * pre-allocate the PTE here because we cannot do the
  142. * allocation during patching when IRQs are disabled.
  143. *
  144. * Using get_locked_pte() to avoid open coding, the lock
  145. * is unnecessary.
  146. */
  147. pte = get_locked_pte(mm, addr, &ptl);
  148. if (!pte)
  149. goto fail_no_pte;
  150. pte_unmap_unlock(pte, ptl);
  151. this_cpu_write(cpu_patching_context.mm, mm);
  152. this_cpu_write(cpu_patching_context.addr, addr);
  153. return 0;
  154. fail_no_pte:
  155. put_patching_mm(mm, addr);
  156. fail_no_mm:
  157. return -ENOMEM;
  158. }
  159. static int text_area_cpu_down_mm(unsigned int cpu)
  160. {
  161. put_patching_mm(this_cpu_read(cpu_patching_context.mm),
  162. this_cpu_read(cpu_patching_context.addr));
  163. this_cpu_write(cpu_patching_context.mm, NULL);
  164. this_cpu_write(cpu_patching_context.addr, 0);
  165. return 0;
  166. }
  167. static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
  168. void __init poking_init(void)
  169. {
  170. int ret;
  171. if (mm_patch_enabled())
  172. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
  173. "powerpc/text_poke_mm:online",
  174. text_area_cpu_up_mm,
  175. text_area_cpu_down_mm);
  176. else
  177. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
  178. "powerpc/text_poke:online",
  179. text_area_cpu_up,
  180. text_area_cpu_down);
  181. /* cpuhp_setup_state returns >= 0 on success */
  182. if (WARN_ON(ret < 0))
  183. return;
  184. static_branch_enable(&poking_init_done);
  185. }
  186. static unsigned long get_patch_pfn(void *addr)
  187. {
  188. if (IS_ENABLED(CONFIG_EXECMEM) && is_vmalloc_or_module_addr(addr))
  189. return vmalloc_to_pfn(addr);
  190. else
  191. return __pa_symbol(addr) >> PAGE_SHIFT;
  192. }
  193. /*
  194. * This can be called for kernel text or a module.
  195. */
  196. static int map_patch_area(void *addr, unsigned long text_poke_addr)
  197. {
  198. unsigned long pfn = get_patch_pfn(addr);
  199. return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
  200. }
  201. static void unmap_patch_area(unsigned long addr)
  202. {
  203. pte_t *ptep;
  204. pmd_t *pmdp;
  205. pud_t *pudp;
  206. p4d_t *p4dp;
  207. pgd_t *pgdp;
  208. pgdp = pgd_offset_k(addr);
  209. if (WARN_ON(pgd_none(*pgdp)))
  210. return;
  211. p4dp = p4d_offset(pgdp, addr);
  212. if (WARN_ON(p4d_none(*p4dp)))
  213. return;
  214. pudp = pud_offset(p4dp, addr);
  215. if (WARN_ON(pud_none(*pudp)))
  216. return;
  217. pmdp = pmd_offset(pudp, addr);
  218. if (WARN_ON(pmd_none(*pmdp)))
  219. return;
  220. ptep = pte_offset_kernel(pmdp, addr);
  221. if (WARN_ON(pte_none(*ptep)))
  222. return;
  223. /*
  224. * In hash, pte_clear flushes the tlb, in radix, we have to
  225. */
  226. pte_clear(&init_mm, addr, ptep);
  227. flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
  228. }
  229. static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword)
  230. {
  231. int err;
  232. u32 *patch_addr;
  233. unsigned long text_poke_addr;
  234. pte_t *pte;
  235. unsigned long pfn = get_patch_pfn(addr);
  236. struct mm_struct *patching_mm;
  237. struct mm_struct *orig_mm;
  238. spinlock_t *ptl;
  239. patching_mm = __this_cpu_read(cpu_patching_context.mm);
  240. text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
  241. patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
  242. pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
  243. if (!pte)
  244. return -ENOMEM;
  245. __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
  246. /* order PTE update before use, also serves as the hwsync */
  247. asm volatile("ptesync": : :"memory");
  248. /* order context switch after arbitrary prior code */
  249. isync();
  250. orig_mm = start_using_temp_mm(patching_mm);
  251. err = __patch_mem(addr, val, patch_addr, is_dword);
  252. /* context synchronisation performed by __patch_instruction (isync or exception) */
  253. stop_using_temp_mm(patching_mm, orig_mm);
  254. pte_clear(patching_mm, text_poke_addr, pte);
  255. /*
  256. * ptesync to order PTE update before TLB invalidation done
  257. * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
  258. */
  259. local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
  260. pte_unmap_unlock(pte, ptl);
  261. return err;
  262. }
  263. static int __do_patch_mem(void *addr, unsigned long val, bool is_dword)
  264. {
  265. int err;
  266. u32 *patch_addr;
  267. unsigned long text_poke_addr;
  268. pte_t *pte;
  269. unsigned long pfn = get_patch_pfn(addr);
  270. text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
  271. patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
  272. pte = __this_cpu_read(cpu_patching_context.pte);
  273. __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
  274. /* See ptesync comment in radix__set_pte_at() */
  275. if (radix_enabled())
  276. asm volatile("ptesync": : :"memory");
  277. err = __patch_mem(addr, val, patch_addr, is_dword);
  278. pte_clear(&init_mm, text_poke_addr, pte);
  279. flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
  280. return err;
  281. }
  282. static int patch_mem(void *addr, unsigned long val, bool is_dword)
  283. {
  284. int err;
  285. unsigned long flags;
  286. /*
  287. * During early early boot patch_instruction is called
  288. * when text_poke_area is not ready, but we still need
  289. * to allow patching. We just do the plain old patching
  290. */
  291. if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
  292. !static_branch_likely(&poking_init_done))
  293. return __patch_mem(addr, val, addr, is_dword);
  294. local_irq_save(flags);
  295. if (mm_patch_enabled())
  296. err = __do_patch_mem_mm(addr, val, is_dword);
  297. else
  298. err = __do_patch_mem(addr, val, is_dword);
  299. local_irq_restore(flags);
  300. return err;
  301. }
  302. #ifdef CONFIG_PPC64
  303. int patch_instruction(u32 *addr, ppc_inst_t instr)
  304. {
  305. if (ppc_inst_prefixed(instr))
  306. return patch_mem(addr, ppc_inst_as_ulong(instr), true);
  307. else
  308. return patch_mem(addr, ppc_inst_val(instr), false);
  309. }
  310. NOKPROBE_SYMBOL(patch_instruction);
  311. int patch_uint(void *addr, unsigned int val)
  312. {
  313. if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
  314. return -EINVAL;
  315. return patch_mem(addr, val, false);
  316. }
  317. NOKPROBE_SYMBOL(patch_uint);
  318. int patch_ulong(void *addr, unsigned long val)
  319. {
  320. if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
  321. return -EINVAL;
  322. return patch_mem(addr, val, true);
  323. }
  324. NOKPROBE_SYMBOL(patch_ulong);
  325. #else
  326. int patch_instruction(u32 *addr, ppc_inst_t instr)
  327. {
  328. return patch_mem(addr, ppc_inst_val(instr), false);
  329. }
  330. NOKPROBE_SYMBOL(patch_instruction)
  331. #endif
  332. static int patch_memset64(u64 *addr, u64 val, size_t count)
  333. {
  334. for (u64 *end = addr + count; addr < end; addr++)
  335. __put_kernel_nofault(addr, &val, u64, failed);
  336. return 0;
  337. failed:
  338. return -EPERM;
  339. }
  340. static int patch_memset32(u32 *addr, u32 val, size_t count)
  341. {
  342. for (u32 *end = addr + count; addr < end; addr++)
  343. __put_kernel_nofault(addr, &val, u32, failed);
  344. return 0;
  345. failed:
  346. return -EPERM;
  347. }
  348. static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
  349. {
  350. unsigned long start = (unsigned long)patch_addr;
  351. int err;
  352. /* Repeat instruction */
  353. if (repeat_instr) {
  354. ppc_inst_t instr = ppc_inst_read(code);
  355. if (ppc_inst_prefixed(instr)) {
  356. u64 val = ppc_inst_as_ulong(instr);
  357. err = patch_memset64((u64 *)patch_addr, val, len / 8);
  358. } else {
  359. u32 val = ppc_inst_val(instr);
  360. err = patch_memset32(patch_addr, val, len / 4);
  361. }
  362. } else {
  363. err = copy_to_kernel_nofault(patch_addr, code, len);
  364. }
  365. smp_wmb(); /* smp write barrier */
  366. flush_icache_range(start, start + len);
  367. return err;
  368. }
  369. /*
  370. * A page is mapped and instructions that fit the page are patched.
  371. * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
  372. */
  373. static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool repeat_instr)
  374. {
  375. struct mm_struct *patching_mm, *orig_mm;
  376. unsigned long pfn = get_patch_pfn(addr);
  377. unsigned long text_poke_addr;
  378. spinlock_t *ptl;
  379. u32 *patch_addr;
  380. pte_t *pte;
  381. int err;
  382. patching_mm = __this_cpu_read(cpu_patching_context.mm);
  383. text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
  384. patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
  385. pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
  386. if (!pte)
  387. return -ENOMEM;
  388. __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
  389. /* order PTE update before use, also serves as the hwsync */
  390. asm volatile("ptesync" ::: "memory");
  391. /* order context switch after arbitrary prior code */
  392. isync();
  393. orig_mm = start_using_temp_mm(patching_mm);
  394. kasan_disable_current();
  395. err = __patch_instructions(patch_addr, code, len, repeat_instr);
  396. kasan_enable_current();
  397. /* context synchronisation performed by __patch_instructions */
  398. stop_using_temp_mm(patching_mm, orig_mm);
  399. pte_clear(patching_mm, text_poke_addr, pte);
  400. /*
  401. * ptesync to order PTE update before TLB invalidation done
  402. * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
  403. */
  404. local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
  405. pte_unmap_unlock(pte, ptl);
  406. return err;
  407. }
  408. /*
  409. * A page is mapped and instructions that fit the page are patched.
  410. * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
  411. */
  412. static int __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
  413. {
  414. unsigned long pfn = get_patch_pfn(addr);
  415. unsigned long text_poke_addr;
  416. u32 *patch_addr;
  417. pte_t *pte;
  418. int err;
  419. text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
  420. patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
  421. pte = __this_cpu_read(cpu_patching_context.pte);
  422. __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
  423. /* See ptesync comment in radix__set_pte_at() */
  424. if (radix_enabled())
  425. asm volatile("ptesync" ::: "memory");
  426. err = __patch_instructions(patch_addr, code, len, repeat_instr);
  427. pte_clear(&init_mm, text_poke_addr, pte);
  428. flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
  429. return err;
  430. }
  431. /*
  432. * Patch 'addr' with 'len' bytes of instructions from 'code'.
  433. *
  434. * If repeat_instr is true, the same instruction is filled for
  435. * 'len' bytes.
  436. */
  437. int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
  438. {
  439. while (len > 0) {
  440. unsigned long flags;
  441. size_t plen;
  442. int err;
  443. plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
  444. local_irq_save(flags);
  445. if (mm_patch_enabled())
  446. err = __do_patch_instructions_mm(addr, code, plen, repeat_instr);
  447. else
  448. err = __do_patch_instructions(addr, code, plen, repeat_instr);
  449. local_irq_restore(flags);
  450. if (err)
  451. return err;
  452. len -= plen;
  453. addr = (u32 *)((unsigned long)addr + plen);
  454. if (!repeat_instr)
  455. code = (u32 *)((unsigned long)code + plen);
  456. }
  457. return 0;
  458. }
  459. NOKPROBE_SYMBOL(patch_instructions);
  460. int patch_branch(u32 *addr, unsigned long target, int flags)
  461. {
  462. ppc_inst_t instr;
  463. if (create_branch(&instr, addr, target, flags))
  464. return -ERANGE;
  465. return patch_instruction(addr, instr);
  466. }
  467. /*
  468. * Helper to check if a given instruction is a conditional branch
  469. * Derived from the conditional checks in analyse_instr()
  470. */
  471. bool is_conditional_branch(ppc_inst_t instr)
  472. {
  473. unsigned int opcode = ppc_inst_primary_opcode(instr);
  474. if (opcode == 16) /* bc, bca, bcl, bcla */
  475. return true;
  476. if (opcode == 19) {
  477. switch ((ppc_inst_val(instr) >> 1) & 0x3ff) {
  478. case 16: /* bclr, bclrl */
  479. case 528: /* bcctr, bcctrl */
  480. case 560: /* bctar, bctarl */
  481. return true;
  482. }
  483. }
  484. return false;
  485. }
  486. NOKPROBE_SYMBOL(is_conditional_branch);
  487. int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
  488. unsigned long target, int flags)
  489. {
  490. long offset;
  491. offset = target;
  492. if (! (flags & BRANCH_ABSOLUTE))
  493. offset = offset - (unsigned long)addr;
  494. /* Check we can represent the target in the instruction format */
  495. if (!is_offset_in_cond_branch_range(offset))
  496. return 1;
  497. /* Mask out the flags and target, so they don't step on each other. */
  498. *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC));
  499. return 0;
  500. }
  501. int instr_is_relative_branch(ppc_inst_t instr)
  502. {
  503. if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
  504. return 0;
  505. return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
  506. }
  507. int instr_is_relative_link_branch(ppc_inst_t instr)
  508. {
  509. return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
  510. }
  511. static unsigned long branch_iform_target(const u32 *instr)
  512. {
  513. signed long imm;
  514. imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
  515. /* If the top bit of the immediate value is set this is negative */
  516. if (imm & 0x2000000)
  517. imm -= 0x4000000;
  518. if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
  519. imm += (unsigned long)instr;
  520. return (unsigned long)imm;
  521. }
  522. static unsigned long branch_bform_target(const u32 *instr)
  523. {
  524. signed long imm;
  525. imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
  526. /* If the top bit of the immediate value is set this is negative */
  527. if (imm & 0x8000)
  528. imm -= 0x10000;
  529. if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
  530. imm += (unsigned long)instr;
  531. return (unsigned long)imm;
  532. }
  533. unsigned long branch_target(const u32 *instr)
  534. {
  535. if (instr_is_branch_iform(ppc_inst_read(instr)))
  536. return branch_iform_target(instr);
  537. else if (instr_is_branch_bform(ppc_inst_read(instr)))
  538. return branch_bform_target(instr);
  539. return 0;
  540. }
  541. int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src)
  542. {
  543. unsigned long target;
  544. target = branch_target(src);
  545. if (instr_is_branch_iform(ppc_inst_read(src)))
  546. return create_branch(instr, dest, target,
  547. ppc_inst_val(ppc_inst_read(src)));
  548. else if (instr_is_branch_bform(ppc_inst_read(src)))
  549. return create_cond_branch(instr, dest, target,
  550. ppc_inst_val(ppc_inst_read(src)));
  551. return 1;
  552. }