insn.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * x86 instruction analysis
  4. *
  5. * Copyright (C) IBM Corporation, 2002, 2004, 2009
  6. */
  7. #include <linux/kernel.h>
  8. #ifdef __KERNEL__
  9. #include <linux/string.h>
  10. #else
  11. #include <string.h>
  12. #endif
  13. #include <asm/inat.h> /*__ignore_sync_check__ */
  14. #include <asm/insn.h> /* __ignore_sync_check__ */
  15. #include <linux/unaligned.h> /* __ignore_sync_check__ */
  16. #include <linux/errno.h>
  17. #include <linux/kconfig.h>
  18. #include <asm/emulate_prefix.h> /* __ignore_sync_check__ */
  19. #define leXX_to_cpu(t, r) \
  20. ({ \
  21. __typeof__(t) v; \
  22. switch (sizeof(t)) { \
  23. case 4: v = le32_to_cpu(r); break; \
  24. case 2: v = le16_to_cpu(r); break; \
  25. case 1: v = r; break; \
  26. default: \
  27. BUILD_BUG(); break; \
  28. } \
  29. v; \
  30. })
  31. /* Verify next sizeof(t) bytes can be on the same instruction */
  32. #define validate_next(t, insn, n) \
  33. ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
  34. #define __get_next(t, insn) \
  35. ({ t r = get_unaligned((t *)(insn)->next_byte); (insn)->next_byte += sizeof(t); leXX_to_cpu(t, r); })
  36. #define __peek_nbyte_next(t, insn, n) \
  37. ({ t r = get_unaligned((t *)(insn)->next_byte + n); leXX_to_cpu(t, r); })
  38. #define get_next(t, insn) \
  39. ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
  40. #define peek_nbyte_next(t, insn, n) \
  41. ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
  42. #define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
  43. /**
  44. * insn_init() - initialize struct insn
  45. * @insn: &struct insn to be initialized
  46. * @kaddr: address (in kernel memory) of instruction (or copy thereof)
  47. * @buf_len: length of the insn buffer at @kaddr
  48. * @x86_64: !0 for 64-bit kernel or 64-bit app
  49. */
  50. void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
  51. {
  52. /*
  53. * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
  54. * even if the input buffer is long enough to hold them.
  55. */
  56. if (buf_len > MAX_INSN_SIZE)
  57. buf_len = MAX_INSN_SIZE;
  58. memset(insn, 0, sizeof(*insn));
  59. insn->kaddr = kaddr;
  60. insn->end_kaddr = kaddr + buf_len;
  61. insn->next_byte = kaddr;
  62. insn->x86_64 = x86_64;
  63. insn->opnd_bytes = 4;
  64. if (x86_64)
  65. insn->addr_bytes = 8;
  66. else
  67. insn->addr_bytes = 4;
  68. }
  69. static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
  70. static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
  71. static int __insn_get_emulate_prefix(struct insn *insn,
  72. const insn_byte_t *prefix, size_t len)
  73. {
  74. size_t i;
  75. for (i = 0; i < len; i++) {
  76. if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
  77. goto err_out;
  78. }
  79. insn->emulate_prefix_size = len;
  80. insn->next_byte += len;
  81. return 1;
  82. err_out:
  83. return 0;
  84. }
  85. static void insn_get_emulate_prefix(struct insn *insn)
  86. {
  87. if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
  88. return;
  89. __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
  90. }
  91. /**
  92. * insn_get_prefixes - scan x86 instruction prefix bytes
  93. * @insn: &struct insn containing instruction
  94. *
  95. * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
  96. * to point to the (first) opcode. No effect if @insn->prefixes.got
  97. * is already set.
  98. *
  99. * * Returns:
  100. * 0: on success
  101. * < 0: on error
  102. */
  103. int insn_get_prefixes(struct insn *insn)
  104. {
  105. struct insn_field *prefixes = &insn->prefixes;
  106. insn_attr_t attr;
  107. insn_byte_t b, lb;
  108. int i, nb;
  109. if (prefixes->got)
  110. return 0;
  111. insn_get_emulate_prefix(insn);
  112. nb = 0;
  113. lb = 0;
  114. b = peek_next(insn_byte_t, insn);
  115. attr = inat_get_opcode_attribute(b);
  116. while (inat_is_legacy_prefix(attr)) {
  117. /* Skip if same prefix */
  118. for (i = 0; i < nb; i++)
  119. if (prefixes->bytes[i] == b)
  120. goto found;
  121. if (nb == 4)
  122. /* Invalid instruction */
  123. break;
  124. prefixes->bytes[nb++] = b;
  125. if (inat_is_address_size_prefix(attr)) {
  126. /* address size switches 2/4 or 4/8 */
  127. if (insn->x86_64)
  128. insn->addr_bytes ^= 12;
  129. else
  130. insn->addr_bytes ^= 6;
  131. } else if (inat_is_operand_size_prefix(attr)) {
  132. /* oprand size switches 2/4 */
  133. insn->opnd_bytes ^= 6;
  134. }
  135. found:
  136. prefixes->nbytes++;
  137. insn->next_byte++;
  138. lb = b;
  139. b = peek_next(insn_byte_t, insn);
  140. attr = inat_get_opcode_attribute(b);
  141. }
  142. /* Set the last prefix */
  143. if (lb && lb != insn->prefixes.bytes[3]) {
  144. if (unlikely(insn->prefixes.bytes[3])) {
  145. /* Swap the last prefix */
  146. b = insn->prefixes.bytes[3];
  147. for (i = 0; i < nb; i++)
  148. if (prefixes->bytes[i] == lb)
  149. insn_set_byte(prefixes, i, b);
  150. }
  151. insn_set_byte(&insn->prefixes, 3, lb);
  152. }
  153. /* Decode REX prefix */
  154. if (insn->x86_64) {
  155. b = peek_next(insn_byte_t, insn);
  156. attr = inat_get_opcode_attribute(b);
  157. if (inat_is_rex_prefix(attr)) {
  158. insn_field_set(&insn->rex_prefix, b, 1);
  159. insn->next_byte++;
  160. if (X86_REX_W(b))
  161. /* REX.W overrides opnd_size */
  162. insn->opnd_bytes = 8;
  163. } else if (inat_is_rex2_prefix(attr)) {
  164. insn_set_byte(&insn->rex_prefix, 0, b);
  165. b = peek_nbyte_next(insn_byte_t, insn, 1);
  166. insn_set_byte(&insn->rex_prefix, 1, b);
  167. insn->rex_prefix.nbytes = 2;
  168. insn->next_byte += 2;
  169. if (X86_REX_W(b))
  170. /* REX.W overrides opnd_size */
  171. insn->opnd_bytes = 8;
  172. insn->rex_prefix.got = 1;
  173. goto vex_end;
  174. }
  175. }
  176. insn->rex_prefix.got = 1;
  177. /* Decode VEX prefix */
  178. b = peek_next(insn_byte_t, insn);
  179. attr = inat_get_opcode_attribute(b);
  180. if (inat_is_vex_prefix(attr)) {
  181. insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
  182. if (!insn->x86_64) {
  183. /*
  184. * In 32-bits mode, if the [7:6] bits (mod bits of
  185. * ModRM) on the second byte are not 11b, it is
  186. * LDS or LES or BOUND.
  187. */
  188. if (X86_MODRM_MOD(b2) != 3)
  189. goto vex_end;
  190. }
  191. insn_set_byte(&insn->vex_prefix, 0, b);
  192. insn_set_byte(&insn->vex_prefix, 1, b2);
  193. if (inat_is_evex_prefix(attr)) {
  194. b2 = peek_nbyte_next(insn_byte_t, insn, 2);
  195. insn_set_byte(&insn->vex_prefix, 2, b2);
  196. b2 = peek_nbyte_next(insn_byte_t, insn, 3);
  197. insn_set_byte(&insn->vex_prefix, 3, b2);
  198. insn->vex_prefix.nbytes = 4;
  199. insn->next_byte += 4;
  200. if (insn->x86_64 && X86_VEX_W(b2))
  201. /* VEX.W overrides opnd_size */
  202. insn->opnd_bytes = 8;
  203. } else if (inat_is_vex3_prefix(attr)) {
  204. b2 = peek_nbyte_next(insn_byte_t, insn, 2);
  205. insn_set_byte(&insn->vex_prefix, 2, b2);
  206. insn->vex_prefix.nbytes = 3;
  207. insn->next_byte += 3;
  208. if (insn->x86_64 && X86_VEX_W(b2))
  209. /* VEX.W overrides opnd_size */
  210. insn->opnd_bytes = 8;
  211. } else {
  212. /*
  213. * For VEX2, fake VEX3-like byte#2.
  214. * Makes it easier to decode vex.W, vex.vvvv,
  215. * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
  216. */
  217. insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f);
  218. insn->vex_prefix.nbytes = 2;
  219. insn->next_byte += 2;
  220. }
  221. }
  222. vex_end:
  223. insn->vex_prefix.got = 1;
  224. prefixes->got = 1;
  225. return 0;
  226. err_out:
  227. return -ENODATA;
  228. }
  229. /**
  230. * insn_get_opcode - collect opcode(s)
  231. * @insn: &struct insn containing instruction
  232. *
  233. * Populates @insn->opcode, updates @insn->next_byte to point past the
  234. * opcode byte(s), and set @insn->attr (except for groups).
  235. * If necessary, first collects any preceding (prefix) bytes.
  236. * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
  237. * is already 1.
  238. *
  239. * Returns:
  240. * 0: on success
  241. * < 0: on error
  242. */
  243. int insn_get_opcode(struct insn *insn)
  244. {
  245. struct insn_field *opcode = &insn->opcode;
  246. int pfx_id, ret;
  247. insn_byte_t op;
  248. if (opcode->got)
  249. return 0;
  250. ret = insn_get_prefixes(insn);
  251. if (ret)
  252. return ret;
  253. /* Get first opcode */
  254. op = get_next(insn_byte_t, insn);
  255. insn_set_byte(opcode, 0, op);
  256. opcode->nbytes = 1;
  257. /* Check if there is VEX prefix or not */
  258. if (insn_is_avx(insn)) {
  259. insn_byte_t m, p;
  260. m = insn_vex_m_bits(insn);
  261. p = insn_vex_p_bits(insn);
  262. insn->attr = inat_get_avx_attribute(op, m, p);
  263. /* SCALABLE EVEX uses p bits to encode operand size */
  264. if (inat_evex_scalable(insn->attr) && !insn_vex_w_bit(insn) &&
  265. p == INAT_PFX_OPNDSZ)
  266. insn->opnd_bytes = 2;
  267. if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
  268. (!inat_accept_vex(insn->attr) &&
  269. !inat_is_group(insn->attr))) {
  270. /* This instruction is bad */
  271. insn->attr = 0;
  272. return -EINVAL;
  273. }
  274. /* VEX has only 1 byte for opcode */
  275. goto end;
  276. }
  277. /* Check if there is REX2 prefix or not */
  278. if (insn_is_rex2(insn)) {
  279. if (insn_rex2_m_bit(insn)) {
  280. /* map 1 is escape 0x0f */
  281. insn_attr_t esc_attr = inat_get_opcode_attribute(0x0f);
  282. pfx_id = insn_last_prefix_id(insn);
  283. insn->attr = inat_get_escape_attribute(op, pfx_id, esc_attr);
  284. } else {
  285. insn->attr = inat_get_opcode_attribute(op);
  286. }
  287. goto end;
  288. }
  289. insn->attr = inat_get_opcode_attribute(op);
  290. while (inat_is_escape(insn->attr)) {
  291. /* Get escaped opcode */
  292. op = get_next(insn_byte_t, insn);
  293. opcode->bytes[opcode->nbytes++] = op;
  294. pfx_id = insn_last_prefix_id(insn);
  295. insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
  296. }
  297. if (inat_must_vex(insn->attr)) {
  298. /* This instruction is bad */
  299. insn->attr = 0;
  300. return -EINVAL;
  301. }
  302. end:
  303. opcode->got = 1;
  304. return 0;
  305. err_out:
  306. return -ENODATA;
  307. }
  308. /**
  309. * insn_get_modrm - collect ModRM byte, if any
  310. * @insn: &struct insn containing instruction
  311. *
  312. * Populates @insn->modrm and updates @insn->next_byte to point past the
  313. * ModRM byte, if any. If necessary, first collects the preceding bytes
  314. * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
  315. *
  316. * Returns:
  317. * 0: on success
  318. * < 0: on error
  319. */
  320. int insn_get_modrm(struct insn *insn)
  321. {
  322. struct insn_field *modrm = &insn->modrm;
  323. insn_byte_t pfx_id, mod;
  324. int ret;
  325. if (modrm->got)
  326. return 0;
  327. ret = insn_get_opcode(insn);
  328. if (ret)
  329. return ret;
  330. if (inat_has_modrm(insn->attr)) {
  331. mod = get_next(insn_byte_t, insn);
  332. insn_field_set(modrm, mod, 1);
  333. if (inat_is_group(insn->attr)) {
  334. pfx_id = insn_last_prefix_id(insn);
  335. insn->attr = inat_get_group_attribute(mod, pfx_id,
  336. insn->attr);
  337. if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
  338. /* Bad insn */
  339. insn->attr = 0;
  340. return -EINVAL;
  341. }
  342. }
  343. }
  344. if (insn->x86_64 && inat_is_force64(insn->attr))
  345. insn->opnd_bytes = 8;
  346. modrm->got = 1;
  347. return 0;
  348. err_out:
  349. return -ENODATA;
  350. }
  351. /**
  352. * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
  353. * @insn: &struct insn containing instruction
  354. *
  355. * If necessary, first collects the instruction up to and including the
  356. * ModRM byte. No effect if @insn->x86_64 is 0.
  357. */
  358. int insn_rip_relative(struct insn *insn)
  359. {
  360. struct insn_field *modrm = &insn->modrm;
  361. int ret;
  362. if (!insn->x86_64)
  363. return 0;
  364. ret = insn_get_modrm(insn);
  365. if (ret)
  366. return 0;
  367. /*
  368. * For rip-relative instructions, the mod field (top 2 bits)
  369. * is zero and the r/m field (bottom 3 bits) is 0x5.
  370. */
  371. return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5);
  372. }
  373. /**
  374. * insn_get_sib() - Get the SIB byte of instruction
  375. * @insn: &struct insn containing instruction
  376. *
  377. * If necessary, first collects the instruction up to and including the
  378. * ModRM byte.
  379. *
  380. * Returns:
  381. * 0: if decoding succeeded
  382. * < 0: otherwise.
  383. */
  384. int insn_get_sib(struct insn *insn)
  385. {
  386. insn_byte_t modrm;
  387. int ret;
  388. if (insn->sib.got)
  389. return 0;
  390. ret = insn_get_modrm(insn);
  391. if (ret)
  392. return ret;
  393. if (insn->modrm.nbytes) {
  394. modrm = insn->modrm.bytes[0];
  395. if (insn->addr_bytes != 2 &&
  396. X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
  397. insn_field_set(&insn->sib,
  398. get_next(insn_byte_t, insn), 1);
  399. }
  400. }
  401. insn->sib.got = 1;
  402. return 0;
  403. err_out:
  404. return -ENODATA;
  405. }
  406. /**
  407. * insn_get_displacement() - Get the displacement of instruction
  408. * @insn: &struct insn containing instruction
  409. *
  410. * If necessary, first collects the instruction up to and including the
  411. * SIB byte.
  412. * Displacement value is sign-expanded.
  413. *
  414. * * Returns:
  415. * 0: if decoding succeeded
  416. * < 0: otherwise.
  417. */
  418. int insn_get_displacement(struct insn *insn)
  419. {
  420. insn_byte_t mod, rm, base;
  421. int ret;
  422. if (insn->displacement.got)
  423. return 0;
  424. ret = insn_get_sib(insn);
  425. if (ret)
  426. return ret;
  427. if (insn->modrm.nbytes) {
  428. /*
  429. * Interpreting the modrm byte:
  430. * mod = 00 - no displacement fields (exceptions below)
  431. * mod = 01 - 1-byte displacement field
  432. * mod = 10 - displacement field is 4 bytes, or 2 bytes if
  433. * address size = 2 (0x67 prefix in 32-bit mode)
  434. * mod = 11 - no memory operand
  435. *
  436. * If address size = 2...
  437. * mod = 00, r/m = 110 - displacement field is 2 bytes
  438. *
  439. * If address size != 2...
  440. * mod != 11, r/m = 100 - SIB byte exists
  441. * mod = 00, SIB base = 101 - displacement field is 4 bytes
  442. * mod = 00, r/m = 101 - rip-relative addressing, displacement
  443. * field is 4 bytes
  444. */
  445. mod = X86_MODRM_MOD(insn->modrm.value);
  446. rm = X86_MODRM_RM(insn->modrm.value);
  447. base = X86_SIB_BASE(insn->sib.value);
  448. if (mod == 3)
  449. goto out;
  450. if (mod == 1) {
  451. insn_field_set(&insn->displacement,
  452. get_next(signed char, insn), 1);
  453. } else if (insn->addr_bytes == 2) {
  454. if ((mod == 0 && rm == 6) || mod == 2) {
  455. insn_field_set(&insn->displacement,
  456. get_next(short, insn), 2);
  457. }
  458. } else {
  459. if ((mod == 0 && rm == 5) || mod == 2 ||
  460. (mod == 0 && base == 5)) {
  461. insn_field_set(&insn->displacement,
  462. get_next(int, insn), 4);
  463. }
  464. }
  465. }
  466. out:
  467. insn->displacement.got = 1;
  468. return 0;
  469. err_out:
  470. return -ENODATA;
  471. }
  472. /* Decode moffset16/32/64. Return 0 if failed */
  473. static int __get_moffset(struct insn *insn)
  474. {
  475. switch (insn->addr_bytes) {
  476. case 2:
  477. insn_field_set(&insn->moffset1, get_next(short, insn), 2);
  478. break;
  479. case 4:
  480. insn_field_set(&insn->moffset1, get_next(int, insn), 4);
  481. break;
  482. case 8:
  483. insn_field_set(&insn->moffset1, get_next(int, insn), 4);
  484. insn_field_set(&insn->moffset2, get_next(int, insn), 4);
  485. break;
  486. default: /* opnd_bytes must be modified manually */
  487. goto err_out;
  488. }
  489. insn->moffset1.got = insn->moffset2.got = 1;
  490. return 1;
  491. err_out:
  492. return 0;
  493. }
  494. /* Decode imm v32(Iz). Return 0 if failed */
  495. static int __get_immv32(struct insn *insn)
  496. {
  497. switch (insn->opnd_bytes) {
  498. case 2:
  499. insn_field_set(&insn->immediate, get_next(short, insn), 2);
  500. break;
  501. case 4:
  502. case 8:
  503. insn_field_set(&insn->immediate, get_next(int, insn), 4);
  504. break;
  505. default: /* opnd_bytes must be modified manually */
  506. goto err_out;
  507. }
  508. return 1;
  509. err_out:
  510. return 0;
  511. }
  512. /* Decode imm v64(Iv/Ov), Return 0 if failed */
  513. static int __get_immv(struct insn *insn)
  514. {
  515. switch (insn->opnd_bytes) {
  516. case 2:
  517. insn_field_set(&insn->immediate1, get_next(short, insn), 2);
  518. break;
  519. case 4:
  520. insn_field_set(&insn->immediate1, get_next(int, insn), 4);
  521. insn->immediate1.nbytes = 4;
  522. break;
  523. case 8:
  524. insn_field_set(&insn->immediate1, get_next(int, insn), 4);
  525. insn_field_set(&insn->immediate2, get_next(int, insn), 4);
  526. break;
  527. default: /* opnd_bytes must be modified manually */
  528. goto err_out;
  529. }
  530. insn->immediate1.got = insn->immediate2.got = 1;
  531. return 1;
  532. err_out:
  533. return 0;
  534. }
  535. /* Decode ptr16:16/32(Ap) */
  536. static int __get_immptr(struct insn *insn)
  537. {
  538. switch (insn->opnd_bytes) {
  539. case 2:
  540. insn_field_set(&insn->immediate1, get_next(short, insn), 2);
  541. break;
  542. case 4:
  543. insn_field_set(&insn->immediate1, get_next(int, insn), 4);
  544. break;
  545. case 8:
  546. /* ptr16:64 is not exist (no segment) */
  547. return 0;
  548. default: /* opnd_bytes must be modified manually */
  549. goto err_out;
  550. }
  551. insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2);
  552. insn->immediate1.got = insn->immediate2.got = 1;
  553. return 1;
  554. err_out:
  555. return 0;
  556. }
  557. /**
  558. * insn_get_immediate() - Get the immediate in an instruction
  559. * @insn: &struct insn containing instruction
  560. *
  561. * If necessary, first collects the instruction up to and including the
  562. * displacement bytes.
  563. * Basically, most of immediates are sign-expanded. Unsigned-value can be
  564. * computed by bit masking with ((1 << (nbytes * 8)) - 1)
  565. *
  566. * Returns:
  567. * 0: on success
  568. * < 0: on error
  569. */
  570. int insn_get_immediate(struct insn *insn)
  571. {
  572. int ret;
  573. if (insn->immediate.got)
  574. return 0;
  575. ret = insn_get_displacement(insn);
  576. if (ret)
  577. return ret;
  578. if (inat_has_moffset(insn->attr)) {
  579. if (!__get_moffset(insn))
  580. goto err_out;
  581. goto done;
  582. }
  583. if (!inat_has_immediate(insn->attr))
  584. /* no immediates */
  585. goto done;
  586. switch (inat_immediate_size(insn->attr)) {
  587. case INAT_IMM_BYTE:
  588. insn_field_set(&insn->immediate, get_next(signed char, insn), 1);
  589. break;
  590. case INAT_IMM_WORD:
  591. insn_field_set(&insn->immediate, get_next(short, insn), 2);
  592. break;
  593. case INAT_IMM_DWORD:
  594. insn_field_set(&insn->immediate, get_next(int, insn), 4);
  595. break;
  596. case INAT_IMM_QWORD:
  597. insn_field_set(&insn->immediate1, get_next(int, insn), 4);
  598. insn_field_set(&insn->immediate2, get_next(int, insn), 4);
  599. break;
  600. case INAT_IMM_PTR:
  601. if (!__get_immptr(insn))
  602. goto err_out;
  603. break;
  604. case INAT_IMM_VWORD32:
  605. if (!__get_immv32(insn))
  606. goto err_out;
  607. break;
  608. case INAT_IMM_VWORD:
  609. if (!__get_immv(insn))
  610. goto err_out;
  611. break;
  612. default:
  613. /* Here, insn must have an immediate, but failed */
  614. goto err_out;
  615. }
  616. if (inat_has_second_immediate(insn->attr)) {
  617. insn_field_set(&insn->immediate2, get_next(signed char, insn), 1);
  618. }
  619. done:
  620. insn->immediate.got = 1;
  621. return 0;
  622. err_out:
  623. return -ENODATA;
  624. }
  625. /**
  626. * insn_get_length() - Get the length of instruction
  627. * @insn: &struct insn containing instruction
  628. *
  629. * If necessary, first collects the instruction up to and including the
  630. * immediates bytes.
  631. *
  632. * Returns:
  633. * - 0 on success
  634. * - < 0 on error
  635. */
  636. int insn_get_length(struct insn *insn)
  637. {
  638. int ret;
  639. if (insn->length)
  640. return 0;
  641. ret = insn_get_immediate(insn);
  642. if (ret)
  643. return ret;
  644. insn->length = (unsigned char)((unsigned long)insn->next_byte
  645. - (unsigned long)insn->kaddr);
  646. return 0;
  647. }
  648. /* Ensure this instruction is decoded completely */
  649. static inline int insn_complete(struct insn *insn)
  650. {
  651. return insn->opcode.got && insn->modrm.got && insn->sib.got &&
  652. insn->displacement.got && insn->immediate.got;
  653. }
  654. /**
  655. * insn_decode() - Decode an x86 instruction
  656. * @insn: &struct insn to be initialized
  657. * @kaddr: address (in kernel memory) of instruction (or copy thereof)
  658. * @buf_len: length of the insn buffer at @kaddr
  659. * @m: insn mode, see enum insn_mode
  660. *
  661. * Returns:
  662. * 0: if decoding succeeded
  663. * < 0: otherwise.
  664. */
  665. int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
  666. {
  667. int ret;
  668. /* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */
  669. if (m == INSN_MODE_KERN)
  670. insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
  671. else
  672. insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
  673. ret = insn_get_length(insn);
  674. if (ret)
  675. return ret;
  676. if (insn_complete(insn))
  677. return 0;
  678. return -EINVAL;
  679. }