bpf_jit.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * bpf_jit.h: BPF JIT compiler for PPC
  4. *
  5. * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
  6. * 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  7. */
  8. #ifndef _BPF_JIT_H
  9. #define _BPF_JIT_H
  10. #ifndef __ASSEMBLY__
  11. #include <asm/types.h>
  12. #include <asm/ppc-opcode.h>
  13. #ifdef CONFIG_PPC64_ELF_ABI_V1
  14. #define FUNCTION_DESCR_SIZE 24
  15. #else
  16. #define FUNCTION_DESCR_SIZE 0
  17. #endif
  18. #define CTX_NIA(ctx) ((unsigned long)ctx->idx * 4)
  19. #define PLANT_INSTR(d, idx, instr) \
  20. do { if (d) { (d)[idx] = instr; } idx++; } while (0)
  21. #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
  22. /* Long jump; (unconditional 'branch') */
  23. #define PPC_JMP(dest) \
  24. do { \
  25. long offset = (long)(dest) - CTX_NIA(ctx); \
  26. if ((dest) != 0 && !is_offset_in_branch_range(offset)) { \
  27. pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
  28. return -ERANGE; \
  29. } \
  30. EMIT(PPC_RAW_BRANCH(offset)); \
  31. } while (0)
  32. /* "cond" here covers BO:BI fields. */
  33. #define PPC_BCC_SHORT(cond, dest) \
  34. do { \
  35. long offset = (long)(dest) - CTX_NIA(ctx); \
  36. if ((dest) != 0 && !is_offset_in_cond_branch_range(offset)) { \
  37. pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
  38. return -ERANGE; \
  39. } \
  40. EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
  41. } while (0)
  42. /* Sign-extended 32-bit immediate load */
  43. #define PPC_LI32(d, i) do { \
  44. if ((int)(uintptr_t)(i) >= -32768 && \
  45. (int)(uintptr_t)(i) < 32768) \
  46. EMIT(PPC_RAW_LI(d, i)); \
  47. else { \
  48. EMIT(PPC_RAW_LIS(d, IMM_H(i))); \
  49. if (IMM_L(i)) \
  50. EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \
  51. } } while(0)
  52. #ifdef CONFIG_PPC64
  53. #define PPC_LI64(d, i) do { \
  54. if ((long)(i) >= -2147483648 && \
  55. (long)(i) < 2147483648) \
  56. PPC_LI32(d, i); \
  57. else { \
  58. if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \
  59. EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) & \
  60. 0xffff)); \
  61. else { \
  62. EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \
  63. if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
  64. EMIT(PPC_RAW_ORI(d, d, \
  65. ((uintptr_t)(i) >> 32) & 0xffff)); \
  66. } \
  67. EMIT(PPC_RAW_SLDI(d, d, 32)); \
  68. if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
  69. EMIT(PPC_RAW_ORIS(d, d, \
  70. ((uintptr_t)(i) >> 16) & 0xffff)); \
  71. if ((uintptr_t)(i) & 0x000000000000ffffULL) \
  72. EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \
  73. 0xffff)); \
  74. } } while (0)
  75. #endif
  76. /*
  77. * The fly in the ointment of code size changing from pass to pass is
  78. * avoided by padding the short branch case with a NOP. If code size differs
  79. * with different branch reaches we will have the issue of code moving from
  80. * one pass to the next and will need a few passes to converge on a stable
  81. * state.
  82. */
  83. #define PPC_BCC(cond, dest) do { \
  84. if (is_offset_in_cond_branch_range((long)(dest) - CTX_NIA(ctx))) { \
  85. PPC_BCC_SHORT(cond, dest); \
  86. EMIT(PPC_RAW_NOP()); \
  87. } else { \
  88. /* Flip the 'T or F' bit to invert comparison */ \
  89. PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, CTX_NIA(ctx) + 2*4); \
  90. PPC_JMP(dest); \
  91. } } while(0)
  92. /* To create a branch condition, select a bit of cr0... */
  93. #define CR0_LT 0
  94. #define CR0_GT 1
  95. #define CR0_EQ 2
  96. /* ...and modify BO[3] */
  97. #define COND_CMP_TRUE 0x100
  98. #define COND_CMP_FALSE 0x000
  99. /* Together, they make all required comparisons: */
  100. #define COND_GT (CR0_GT | COND_CMP_TRUE)
  101. #define COND_GE (CR0_LT | COND_CMP_FALSE)
  102. #define COND_EQ (CR0_EQ | COND_CMP_TRUE)
  103. #define COND_NE (CR0_EQ | COND_CMP_FALSE)
  104. #define COND_LT (CR0_LT | COND_CMP_TRUE)
  105. #define COND_LE (CR0_GT | COND_CMP_FALSE)
  106. #define SEEN_FUNC 0x20000000 /* might call external helpers */
  107. #define SEEN_TAILCALL 0x40000000 /* uses tail calls */
  108. struct codegen_context {
  109. /*
  110. * This is used to track register usage as well
  111. * as calls to external helpers.
  112. * - register usage is tracked with corresponding
  113. * bits (r3-r31)
  114. * - rest of the bits can be used to track other
  115. * things -- for now, we use bits 0 to 2
  116. * encoded in SEEN_* macros above
  117. */
  118. unsigned int seen;
  119. unsigned int idx;
  120. unsigned int stack_size;
  121. int b2p[MAX_BPF_JIT_REG + 2];
  122. unsigned int exentry_idx;
  123. unsigned int alt_exit_addr;
  124. };
  125. #define bpf_to_ppc(r) (ctx->b2p[r])
  126. #ifdef CONFIG_PPC32
  127. #define BPF_FIXUP_LEN 3 /* Three instructions => 12 bytes */
  128. #else
  129. #define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
  130. #endif
  131. static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
  132. {
  133. return ctx->seen & (1 << (31 - i));
  134. }
  135. static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
  136. {
  137. ctx->seen |= 1 << (31 - i);
  138. }
  139. static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
  140. {
  141. ctx->seen &= ~(1 << (31 - i));
  142. }
  143. void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
  144. int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func);
  145. int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
  146. u32 *addrs, int pass, bool extra_pass);
  147. void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
  148. void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
  149. void bpf_jit_realloc_regs(struct codegen_context *ctx);
  150. int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
  151. int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
  152. struct codegen_context *ctx, int insn_idx,
  153. int jmp_off, int dst_reg);
  154. #endif
  155. #endif