bpf_jit_asm.S 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
  3. * compiler.
  4. *
  5. * Copyright (C) 2015 Imagination Technologies Ltd.
  6. * Author: Markos Chandras <markos.chandras@imgtec.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; version 2 of the License.
  11. */
  12. #include <asm/asm.h>
  13. #include <asm/isa-rev.h>
  14. #include <asm/regdef.h>
  15. #include "bpf_jit.h"
  16. /* ABI
  17. *
  18. * r_skb_hl skb header length
  19. * r_skb_data skb data
  20. * r_off(a1) offset register
  21. * r_A BPF register A
  22. * r_X PF register X
  23. * r_skb(a0) *skb
  24. * r_M *scratch memory
  25. * r_skb_le skb length
  26. * r_s0 Scratch register 0
  27. * r_s1 Scratch register 1
  28. *
  29. * On entry:
  30. * a0: *skb
  31. * a1: offset (imm or imm + X)
  32. *
  33. * All non-BPF-ABI registers are free for use. On return, we only
  34. * care about r_ret. The BPF-ABI registers are assumed to remain
  35. * unmodified during the entire filter operation.
  36. */
  37. #define skb a0
  38. #define offset a1
  39. #define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
  40. /* We know better :) so prevent assembler reordering etc */
  41. .set noreorder
  42. #define is_offset_negative(TYPE) \
  43. /* If offset is negative we have more work to do */ \
  44. slti t0, offset, 0; \
  45. bgtz t0, bpf_slow_path_##TYPE##_neg; \
  46. /* Be careful what follows in DS. */
  47. #define is_offset_in_header(SIZE, TYPE) \
  48. /* Reading from header? */ \
  49. addiu $r_s0, $r_skb_hl, -SIZE; \
  50. slt t0, $r_s0, offset; \
  51. bgtz t0, bpf_slow_path_##TYPE; \
  52. LEAF(sk_load_word)
  53. is_offset_negative(word)
  54. FEXPORT(sk_load_word_positive)
  55. is_offset_in_header(4, word)
  56. /* Offset within header boundaries */
  57. PTR_ADDU t1, $r_skb_data, offset
  58. .set reorder
  59. lw $r_A, 0(t1)
  60. .set noreorder
  61. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  62. # if MIPS_ISA_REV >= 2
  63. wsbh t0, $r_A
  64. rotr $r_A, t0, 16
  65. # else
  66. sll t0, $r_A, 24
  67. srl t1, $r_A, 24
  68. srl t2, $r_A, 8
  69. or t0, t0, t1
  70. andi t2, t2, 0xff00
  71. andi t1, $r_A, 0xff00
  72. or t0, t0, t2
  73. sll t1, t1, 8
  74. or $r_A, t0, t1
  75. # endif
  76. #endif
  77. jr $r_ra
  78. move $r_ret, zero
  79. END(sk_load_word)
  80. LEAF(sk_load_half)
  81. is_offset_negative(half)
  82. FEXPORT(sk_load_half_positive)
  83. is_offset_in_header(2, half)
  84. /* Offset within header boundaries */
  85. PTR_ADDU t1, $r_skb_data, offset
  86. lhu $r_A, 0(t1)
  87. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  88. # if MIPS_ISA_REV >= 2
  89. wsbh $r_A, $r_A
  90. # else
  91. sll t0, $r_A, 8
  92. srl t1, $r_A, 8
  93. andi t0, t0, 0xff00
  94. or $r_A, t0, t1
  95. # endif
  96. #endif
  97. jr $r_ra
  98. move $r_ret, zero
  99. END(sk_load_half)
  100. LEAF(sk_load_byte)
  101. is_offset_negative(byte)
  102. FEXPORT(sk_load_byte_positive)
  103. is_offset_in_header(1, byte)
  104. /* Offset within header boundaries */
  105. PTR_ADDU t1, $r_skb_data, offset
  106. lbu $r_A, 0(t1)
  107. jr $r_ra
  108. move $r_ret, zero
  109. END(sk_load_byte)
  110. /*
  111. * call skb_copy_bits:
  112. * (prototype in linux/skbuff.h)
  113. *
  114. * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
  115. *
  116. * o32 mandates we leave 4 spaces for argument registers in case
  117. * the callee needs to use them. Even though we don't care about
  118. * the argument registers ourselves, we need to allocate that space
  119. * to remain ABI compliant since the callee may want to use that space.
  120. * We also allocate 2 more spaces for $r_ra and our return register (*to).
  121. *
  122. * n64 is a bit different. The *caller* will allocate the space to preserve
  123. * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
  124. * good reason but it does not matter that much really.
  125. *
  126. * (void *to) is returned in r_s0
  127. *
  128. */
  129. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  130. #define DS_OFFSET(SIZE) (4 * SZREG)
  131. #else
  132. #define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
  133. #endif
  134. #define bpf_slow_path_common(SIZE) \
  135. /* Quick check. Are we within reasonable boundaries? */ \
  136. LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
  137. sltu $r_s0, offset, $r_s1; \
  138. beqz $r_s0, fault; \
  139. /* Load 4th argument in DS */ \
  140. LONG_ADDIU a3, zero, SIZE; \
  141. PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
  142. PTR_LA t0, skb_copy_bits; \
  143. PTR_S $r_ra, (5 * SZREG)($r_sp); \
  144. /* Assign low slot to a2 */ \
  145. PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
  146. jalr t0; \
  147. /* Reset our destination slot (DS but it's ok) */ \
  148. INT_S zero, (4 * SZREG)($r_sp); \
  149. /* \
  150. * skb_copy_bits returns 0 on success and -EFAULT \
  151. * on error. Our data live in a2. Do not bother with \
  152. * our data if an error has been returned. \
  153. */ \
  154. /* Restore our frame */ \
  155. PTR_L $r_ra, (5 * SZREG)($r_sp); \
  156. INT_L $r_s0, (4 * SZREG)($r_sp); \
  157. bltz v0, fault; \
  158. PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
  159. move $r_ret, zero; \
  160. NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
  161. bpf_slow_path_common(4)
  162. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  163. # if MIPS_ISA_REV >= 2
  164. wsbh t0, $r_s0
  165. jr $r_ra
  166. rotr $r_A, t0, 16
  167. # else
  168. sll t0, $r_s0, 24
  169. srl t1, $r_s0, 24
  170. srl t2, $r_s0, 8
  171. or t0, t0, t1
  172. andi t2, t2, 0xff00
  173. andi t1, $r_s0, 0xff00
  174. or t0, t0, t2
  175. sll t1, t1, 8
  176. jr $r_ra
  177. or $r_A, t0, t1
  178. # endif
  179. #else
  180. jr $r_ra
  181. move $r_A, $r_s0
  182. #endif
  183. END(bpf_slow_path_word)
  184. NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
  185. bpf_slow_path_common(2)
  186. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  187. # if MIPS_ISA_REV >= 2
  188. jr $r_ra
  189. wsbh $r_A, $r_s0
  190. # else
  191. sll t0, $r_s0, 8
  192. andi t1, $r_s0, 0xff00
  193. andi t0, t0, 0xff00
  194. srl t1, t1, 8
  195. jr $r_ra
  196. or $r_A, t0, t1
  197. # endif
  198. #else
  199. jr $r_ra
  200. move $r_A, $r_s0
  201. #endif
  202. END(bpf_slow_path_half)
  203. NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
  204. bpf_slow_path_common(1)
  205. jr $r_ra
  206. move $r_A, $r_s0
  207. END(bpf_slow_path_byte)
  208. /*
  209. * Negative entry points
  210. */
  211. .macro bpf_is_end_of_data
  212. li t0, SKF_LL_OFF
  213. /* Reading link layer data? */
  214. slt t1, offset, t0
  215. bgtz t1, fault
  216. /* Be careful what follows in DS. */
  217. .endm
  218. /*
  219. * call skb_copy_bits:
  220. * (prototype in linux/filter.h)
  221. *
  222. * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
  223. * int k, unsigned int size)
  224. *
  225. * see above (bpf_slow_path_common) for ABI restrictions
  226. */
  227. #define bpf_negative_common(SIZE) \
  228. PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
  229. PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
  230. PTR_S $r_ra, (5 * SZREG)($r_sp); \
  231. jalr t0; \
  232. li a2, SIZE; \
  233. PTR_L $r_ra, (5 * SZREG)($r_sp); \
  234. /* Check return pointer */ \
  235. beqz v0, fault; \
  236. PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
  237. /* Preserve our pointer */ \
  238. move $r_s0, v0; \
  239. /* Set return value */ \
  240. move $r_ret, zero; \
  241. bpf_slow_path_word_neg:
  242. bpf_is_end_of_data
  243. NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
  244. bpf_negative_common(4)
  245. jr $r_ra
  246. lw $r_A, 0($r_s0)
  247. END(sk_load_word_negative)
  248. bpf_slow_path_half_neg:
  249. bpf_is_end_of_data
  250. NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
  251. bpf_negative_common(2)
  252. jr $r_ra
  253. lhu $r_A, 0($r_s0)
  254. END(sk_load_half_negative)
  255. bpf_slow_path_byte_neg:
  256. bpf_is_end_of_data
  257. NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
  258. bpf_negative_common(1)
  259. jr $r_ra
  260. lbu $r_A, 0($r_s0)
  261. END(sk_load_byte_negative)
  262. fault:
  263. jr $r_ra
  264. addiu $r_ret, zero, 1