test_align.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. #include <asm/types.h>
  2. #include <linux/types.h>
  3. #include <stdint.h>
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <unistd.h>
  7. #include <errno.h>
  8. #include <string.h>
  9. #include <stddef.h>
  10. #include <stdbool.h>
  11. #include <linux/unistd.h>
  12. #include <linux/filter.h>
  13. #include <linux/bpf_perf_event.h>
  14. #include <linux/bpf.h>
  15. #include <bpf/bpf.h>
  16. #include "../../../include/linux/filter.h"
  17. #include "bpf_rlimit.h"
  18. #include "bpf_util.h"
  19. #define MAX_INSNS 512
  20. #define MAX_MATCHES 16
  21. struct bpf_reg_match {
  22. unsigned int line;
  23. const char *match;
  24. };
  25. struct bpf_align_test {
  26. const char *descr;
  27. struct bpf_insn insns[MAX_INSNS];
  28. enum {
  29. UNDEF,
  30. ACCEPT,
  31. REJECT
  32. } result;
  33. enum bpf_prog_type prog_type;
  34. /* Matches must be in order of increasing line */
  35. struct bpf_reg_match matches[MAX_MATCHES];
  36. };
  37. static struct bpf_align_test tests[] = {
  38. /* Four tests of known constants. These aren't staggeringly
  39. * interesting since we track exact values now.
  40. */
  41. {
  42. .descr = "mov",
  43. .insns = {
  44. BPF_MOV64_IMM(BPF_REG_3, 2),
  45. BPF_MOV64_IMM(BPF_REG_3, 4),
  46. BPF_MOV64_IMM(BPF_REG_3, 8),
  47. BPF_MOV64_IMM(BPF_REG_3, 16),
  48. BPF_MOV64_IMM(BPF_REG_3, 32),
  49. BPF_MOV64_IMM(BPF_REG_0, 0),
  50. BPF_EXIT_INSN(),
  51. },
  52. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  53. .matches = {
  54. {1, "R1=ctx(id=0,off=0,imm=0)"},
  55. {1, "R10=fp0"},
  56. {1, "R3_w=inv2"},
  57. {2, "R3_w=inv4"},
  58. {3, "R3_w=inv8"},
  59. {4, "R3_w=inv16"},
  60. {5, "R3_w=inv32"},
  61. },
  62. },
  63. {
  64. .descr = "shift",
  65. .insns = {
  66. BPF_MOV64_IMM(BPF_REG_3, 1),
  67. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  68. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  69. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  70. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  71. BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
  72. BPF_MOV64_IMM(BPF_REG_4, 32),
  73. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  74. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  75. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  76. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  77. BPF_MOV64_IMM(BPF_REG_0, 0),
  78. BPF_EXIT_INSN(),
  79. },
  80. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  81. .matches = {
  82. {1, "R1=ctx(id=0,off=0,imm=0)"},
  83. {1, "R10=fp0"},
  84. {1, "R3_w=inv1"},
  85. {2, "R3_w=inv2"},
  86. {3, "R3_w=inv4"},
  87. {4, "R3_w=inv8"},
  88. {5, "R3_w=inv16"},
  89. {6, "R3_w=inv1"},
  90. {7, "R4_w=inv32"},
  91. {8, "R4_w=inv16"},
  92. {9, "R4_w=inv8"},
  93. {10, "R4_w=inv4"},
  94. {11, "R4_w=inv2"},
  95. },
  96. },
  97. {
  98. .descr = "addsub",
  99. .insns = {
  100. BPF_MOV64_IMM(BPF_REG_3, 4),
  101. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
  102. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
  103. BPF_MOV64_IMM(BPF_REG_4, 8),
  104. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  105. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
  106. BPF_MOV64_IMM(BPF_REG_0, 0),
  107. BPF_EXIT_INSN(),
  108. },
  109. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  110. .matches = {
  111. {1, "R1=ctx(id=0,off=0,imm=0)"},
  112. {1, "R10=fp0"},
  113. {1, "R3_w=inv4"},
  114. {2, "R3_w=inv8"},
  115. {3, "R3_w=inv10"},
  116. {4, "R4_w=inv8"},
  117. {5, "R4_w=inv12"},
  118. {6, "R4_w=inv14"},
  119. },
  120. },
  121. {
  122. .descr = "mul",
  123. .insns = {
  124. BPF_MOV64_IMM(BPF_REG_3, 7),
  125. BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
  126. BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
  127. BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
  128. BPF_MOV64_IMM(BPF_REG_0, 0),
  129. BPF_EXIT_INSN(),
  130. },
  131. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  132. .matches = {
  133. {1, "R1=ctx(id=0,off=0,imm=0)"},
  134. {1, "R10=fp0"},
  135. {1, "R3_w=inv7"},
  136. {2, "R3_w=inv7"},
  137. {3, "R3_w=inv14"},
  138. {4, "R3_w=inv56"},
  139. },
  140. },
  141. /* Tests using unknown values */
  142. #define PREP_PKT_POINTERS \
  143. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
  144. offsetof(struct __sk_buff, data)), \
  145. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
  146. offsetof(struct __sk_buff, data_end))
  147. #define LOAD_UNKNOWN(DST_REG) \
  148. PREP_PKT_POINTERS, \
  149. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
  150. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
  151. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
  152. BPF_EXIT_INSN(), \
  153. BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
  154. {
  155. .descr = "unknown shift",
  156. .insns = {
  157. LOAD_UNKNOWN(BPF_REG_3),
  158. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  159. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  160. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  161. BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
  162. LOAD_UNKNOWN(BPF_REG_4),
  163. BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
  164. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  165. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  166. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  167. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
  168. BPF_MOV64_IMM(BPF_REG_0, 0),
  169. BPF_EXIT_INSN(),
  170. },
  171. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  172. .matches = {
  173. {7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
  174. {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  175. {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
  176. {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  177. {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
  178. {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
  179. {18, "R3=pkt_end(id=0,off=0,imm=0)"},
  180. {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  181. {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
  182. {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
  183. {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
  184. {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  185. {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
  186. },
  187. },
  188. {
  189. .descr = "unknown mul",
  190. .insns = {
  191. LOAD_UNKNOWN(BPF_REG_3),
  192. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  193. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
  194. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  195. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
  196. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  197. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
  198. BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
  199. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
  200. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
  201. BPF_MOV64_IMM(BPF_REG_0, 0),
  202. BPF_EXIT_INSN(),
  203. },
  204. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  205. .matches = {
  206. {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  207. {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  208. {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  209. {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  210. {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
  211. {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  212. {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  213. {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  214. {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
  215. {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
  216. },
  217. },
  218. {
  219. .descr = "packet const offset",
  220. .insns = {
  221. PREP_PKT_POINTERS,
  222. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  223. BPF_MOV64_IMM(BPF_REG_0, 0),
  224. /* Skip over ethernet header. */
  225. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  226. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  227. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  228. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  229. BPF_EXIT_INSN(),
  230. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
  231. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
  232. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
  233. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
  234. BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
  235. BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
  236. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  237. BPF_MOV64_IMM(BPF_REG_0, 0),
  238. BPF_EXIT_INSN(),
  239. },
  240. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  241. .matches = {
  242. {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
  243. {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
  244. {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
  245. {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
  246. {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
  247. {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
  248. {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
  249. {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
  250. },
  251. },
  252. {
  253. .descr = "packet variable offset",
  254. .insns = {
  255. LOAD_UNKNOWN(BPF_REG_6),
  256. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  257. /* First, add a constant to the R5 packet pointer,
  258. * then a variable with a known alignment.
  259. */
  260. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  261. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  262. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  263. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  264. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  265. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  266. BPF_EXIT_INSN(),
  267. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  268. /* Now, test in the other direction. Adding first
  269. * the variable offset to R5, then the constant.
  270. */
  271. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  272. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  273. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  274. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  275. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  276. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  277. BPF_EXIT_INSN(),
  278. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  279. /* Test multiple accumulations of unknown values
  280. * into a packet pointer.
  281. */
  282. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  283. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  284. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  285. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
  286. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  287. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  288. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  289. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  290. BPF_EXIT_INSN(),
  291. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
  292. BPF_MOV64_IMM(BPF_REG_0, 0),
  293. BPF_EXIT_INSN(),
  294. },
  295. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  296. .matches = {
  297. /* Calculated offset in R6 has unknown value, but known
  298. * alignment of 4.
  299. */
  300. {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  301. {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  302. /* Offset is added to packet pointer R5, resulting in
  303. * known fixed offset, and variable offset from R6.
  304. */
  305. {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  306. /* At the time the word size load is performed from R5,
  307. * it's total offset is NET_IP_ALIGN + reg->off (0) +
  308. * reg->aux_off (14) which is 16. Then the variable
  309. * offset is considered using reg->aux_off_align which
  310. * is 4 and meets the load's requirements.
  311. */
  312. {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  313. {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  314. /* Variable offset is added to R5 packet pointer,
  315. * resulting in auxiliary alignment of 4.
  316. */
  317. {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  318. /* Constant offset is added to R5, resulting in
  319. * reg->off of 14.
  320. */
  321. {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  322. /* At the time the word size load is performed from R5,
  323. * its total fixed offset is NET_IP_ALIGN + reg->off
  324. * (14) which is 16. Then the variable offset is 4-byte
  325. * aligned, so the total offset is 4-byte aligned and
  326. * meets the load's requirements.
  327. */
  328. {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  329. {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
  330. /* Constant offset is added to R5 packet pointer,
  331. * resulting in reg->off value of 14.
  332. */
  333. {26, "R5_w=pkt(id=0,off=14,r=8"},
  334. /* Variable offset is added to R5, resulting in a
  335. * variable offset of (4n).
  336. */
  337. {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  338. /* Constant is added to R5 again, setting reg->off to 18. */
  339. {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  340. /* And once more we add a variable; resulting var_off
  341. * is still (4n), fixed offset is not changed.
  342. * Also, we create a new reg->id.
  343. */
  344. {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
  345. /* At the time the word size load is performed from R5,
  346. * its total fixed offset is NET_IP_ALIGN + reg->off (18)
  347. * which is 20. Then the variable offset is (4n), so
  348. * the total offset is 4-byte aligned and meets the
  349. * load's requirements.
  350. */
  351. {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
  352. {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
  353. },
  354. },
  355. {
  356. .descr = "packet variable offset 2",
  357. .insns = {
  358. /* Create an unknown offset, (4n+2)-aligned */
  359. LOAD_UNKNOWN(BPF_REG_6),
  360. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  361. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
  362. /* Add it to the packet pointer */
  363. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  364. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  365. /* Check bounds and perform a read */
  366. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  367. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  368. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  369. BPF_EXIT_INSN(),
  370. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  371. /* Make a (4n) offset from the value we just read */
  372. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
  373. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  374. /* Add it to the packet pointer */
  375. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  376. /* Check bounds and perform a read */
  377. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  378. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  379. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  380. BPF_EXIT_INSN(),
  381. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  382. BPF_MOV64_IMM(BPF_REG_0, 0),
  383. BPF_EXIT_INSN(),
  384. },
  385. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  386. .matches = {
  387. /* Calculated offset in R6 has unknown value, but known
  388. * alignment of 4.
  389. */
  390. {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  391. {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  392. /* Adding 14 makes R6 be (4n+2) */
  393. {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  394. /* Packet pointer has (4n+2) offset */
  395. {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  396. {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  397. /* At the time the word size load is performed from R5,
  398. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  399. * which is 2. Then the variable offset is (4n+2), so
  400. * the total offset is 4-byte aligned and meets the
  401. * load's requirements.
  402. */
  403. {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  404. /* Newly read value in R6 was shifted left by 2, so has
  405. * known alignment of 4.
  406. */
  407. {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  408. /* Added (4n) to packet pointer's (4n+2) var_off, giving
  409. * another (4n+2).
  410. */
  411. {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
  412. {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
  413. /* At the time the word size load is performed from R5,
  414. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  415. * which is 2. Then the variable offset is (4n+2), so
  416. * the total offset is 4-byte aligned and meets the
  417. * load's requirements.
  418. */
  419. {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
  420. },
  421. },
  422. {
  423. .descr = "dubious pointer arithmetic",
  424. .insns = {
  425. PREP_PKT_POINTERS,
  426. BPF_MOV64_IMM(BPF_REG_0, 0),
  427. /* (ptr - ptr) << 2 */
  428. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  429. BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
  430. BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
  431. /* We have a (4n) value. Let's make a packet offset
  432. * out of it. First add 14, to make it a (4n+2)
  433. */
  434. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  435. /* Then make sure it's nonnegative */
  436. BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
  437. BPF_EXIT_INSN(),
  438. /* Add it to packet pointer */
  439. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  440. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  441. /* Check bounds and perform a read */
  442. BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
  443. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  444. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  445. BPF_EXIT_INSN(),
  446. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
  447. BPF_EXIT_INSN(),
  448. },
  449. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  450. .result = REJECT,
  451. .matches = {
  452. {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
  453. /* (ptr - ptr) << 2 == unknown, (4n) */
  454. {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
  455. /* (4n) + 14 == (4n+2). We blow our bounds, because
  456. * the add could overflow.
  457. */
  458. {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
  459. /* Checked s>=0 */
  460. {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  461. /* packet pointer + nonnegative (4n+2) */
  462. {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  463. {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  464. /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
  465. * We checked the bounds, but it might have been able
  466. * to overflow if the packet pointer started in the
  467. * upper half of the address space.
  468. * So we did not get a 'range' on R6, and the access
  469. * attempt will fail.
  470. */
  471. {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
  472. }
  473. },
  474. {
  475. .descr = "variable subtraction",
  476. .insns = {
  477. /* Create an unknown offset, (4n+2)-aligned */
  478. LOAD_UNKNOWN(BPF_REG_6),
  479. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  480. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  481. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
  482. /* Create another unknown, (4n)-aligned, and subtract
  483. * it from the first one
  484. */
  485. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
  486. BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
  487. /* Bounds-check the result */
  488. BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
  489. BPF_EXIT_INSN(),
  490. /* Add it to the packet pointer */
  491. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  492. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
  493. /* Check bounds and perform a read */
  494. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  495. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  496. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  497. BPF_EXIT_INSN(),
  498. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  499. BPF_EXIT_INSN(),
  500. },
  501. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  502. .matches = {
  503. /* Calculated offset in R6 has unknown value, but known
  504. * alignment of 4.
  505. */
  506. {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  507. {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  508. /* Adding 14 makes R6 be (4n+2) */
  509. {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
  510. /* New unknown value in R7 is (4n) */
  511. {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
  512. /* Subtracting it from R6 blows our unsigned bounds */
  513. {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
  514. /* Checked s>= 0 */
  515. {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
  516. /* At the time the word size load is performed from R5,
  517. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  518. * which is 2. Then the variable offset is (4n+2), so
  519. * the total offset is 4-byte aligned and meets the
  520. * load's requirements.
  521. */
  522. {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
  523. },
  524. },
  525. {
  526. .descr = "pointer variable subtraction",
  527. .insns = {
  528. /* Create an unknown offset, (4n+2)-aligned and bounded
  529. * to [14,74]
  530. */
  531. LOAD_UNKNOWN(BPF_REG_6),
  532. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  533. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
  534. BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
  535. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
  536. /* Subtract it from the packet pointer */
  537. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  538. BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
  539. /* Create another unknown, (4n)-aligned and >= 74.
  540. * That in fact means >= 76, since 74 % 4 == 2
  541. */
  542. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
  543. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
  544. /* Add it to the packet pointer */
  545. BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
  546. /* Check bounds and perform a read */
  547. BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
  548. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  549. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
  550. BPF_EXIT_INSN(),
  551. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
  552. BPF_EXIT_INSN(),
  553. },
  554. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  555. .matches = {
  556. /* Calculated offset in R6 has unknown value, but known
  557. * alignment of 4.
  558. */
  559. {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
  560. {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
  561. /* Adding 14 makes R6 be (4n+2) */
  562. {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
  563. /* Subtracting from packet pointer overflows ubounds */
  564. {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
  565. /* New unknown value in R7 is (4n), >= 76 */
  566. {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
  567. /* Adding it to packet pointer gives nice bounds again */
  568. {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
  569. /* At the time the word size load is performed from R5,
  570. * its total fixed offset is NET_IP_ALIGN + reg->off (0)
  571. * which is 2. Then the variable offset is (4n+2), so
  572. * the total offset is 4-byte aligned and meets the
  573. * load's requirements.
  574. */
  575. {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
  576. },
  577. },
  578. };
  579. static int probe_filter_length(const struct bpf_insn *fp)
  580. {
  581. int len;
  582. for (len = MAX_INSNS - 1; len > 0; --len)
  583. if (fp[len].code != 0 || fp[len].imm != 0)
  584. break;
  585. return len + 1;
  586. }
  587. static char bpf_vlog[32768];
  588. static int do_test_single(struct bpf_align_test *test)
  589. {
  590. struct bpf_insn *prog = test->insns;
  591. int prog_type = test->prog_type;
  592. char bpf_vlog_copy[32768];
  593. const char *line_ptr;
  594. int cur_line = -1;
  595. int prog_len, i;
  596. int fd_prog;
  597. int ret;
  598. prog_len = probe_filter_length(prog);
  599. fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
  600. prog, prog_len, 1, "GPL", 0,
  601. bpf_vlog, sizeof(bpf_vlog), 2);
  602. if (fd_prog < 0 && test->result != REJECT) {
  603. printf("Failed to load program.\n");
  604. printf("%s", bpf_vlog);
  605. ret = 1;
  606. } else if (fd_prog >= 0 && test->result == REJECT) {
  607. printf("Unexpected success to load!\n");
  608. printf("%s", bpf_vlog);
  609. ret = 1;
  610. close(fd_prog);
  611. } else {
  612. ret = 0;
  613. /* We make a local copy so that we can strtok() it */
  614. strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
  615. line_ptr = strtok(bpf_vlog_copy, "\n");
  616. for (i = 0; i < MAX_MATCHES; i++) {
  617. struct bpf_reg_match m = test->matches[i];
  618. if (!m.match)
  619. break;
  620. while (line_ptr) {
  621. cur_line = -1;
  622. sscanf(line_ptr, "%u: ", &cur_line);
  623. if (cur_line == m.line)
  624. break;
  625. line_ptr = strtok(NULL, "\n");
  626. }
  627. if (!line_ptr) {
  628. printf("Failed to find line %u for match: %s\n",
  629. m.line, m.match);
  630. ret = 1;
  631. printf("%s", bpf_vlog);
  632. break;
  633. }
  634. if (!strstr(line_ptr, m.match)) {
  635. printf("Failed to find match %u: %s\n",
  636. m.line, m.match);
  637. ret = 1;
  638. printf("%s", bpf_vlog);
  639. break;
  640. }
  641. }
  642. if (fd_prog >= 0)
  643. close(fd_prog);
  644. }
  645. return ret;
  646. }
  647. static int do_test(unsigned int from, unsigned int to)
  648. {
  649. int all_pass = 0;
  650. int all_fail = 0;
  651. unsigned int i;
  652. for (i = from; i < to; i++) {
  653. struct bpf_align_test *test = &tests[i];
  654. int fail;
  655. printf("Test %3d: %s ... ",
  656. i, test->descr);
  657. fail = do_test_single(test);
  658. if (fail) {
  659. all_fail++;
  660. printf("FAIL\n");
  661. } else {
  662. all_pass++;
  663. printf("PASS\n");
  664. }
  665. }
  666. printf("Results: %d pass %d fail\n",
  667. all_pass, all_fail);
  668. return all_fail ? EXIT_FAILURE : EXIT_SUCCESS;
  669. }
  670. int main(int argc, char **argv)
  671. {
  672. unsigned int from = 0, to = ARRAY_SIZE(tests);
  673. if (argc == 3) {
  674. unsigned int l = atoi(argv[argc - 2]);
  675. unsigned int u = atoi(argv[argc - 1]);
  676. if (l < to && u < to) {
  677. from = l;
  678. to = u + 1;
  679. }
  680. } else if (argc == 2) {
  681. unsigned int t = atoi(argv[argc - 1]);
  682. if (t < to) {
  683. from = t;
  684. to = t + 1;
  685. }
  686. }
  687. return do_test(from, to);
  688. }