insn.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645
  1. /*
  2. * Copyright (C) 2013 Huawei Ltd.
  3. * Author: Jiang Liu <liuj97@gmail.com>
  4. *
  5. * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/bug.h>
  21. #include <linux/compiler.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/stop_machine.h>
  27. #include <linux/types.h>
  28. #include <linux/uaccess.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/debug-monitors.h>
  31. #include <asm/fixmap.h>
  32. #include <asm/insn.h>
  33. #include <asm/kprobes.h>
  34. #define AARCH64_INSN_SF_BIT BIT(31)
  35. #define AARCH64_INSN_N_BIT BIT(22)
  36. #define AARCH64_INSN_LSL_12 BIT(22)
  37. static int aarch64_insn_encoding_class[] = {
  38. AARCH64_INSN_CLS_UNKNOWN,
  39. AARCH64_INSN_CLS_UNKNOWN,
  40. AARCH64_INSN_CLS_UNKNOWN,
  41. AARCH64_INSN_CLS_UNKNOWN,
  42. AARCH64_INSN_CLS_LDST,
  43. AARCH64_INSN_CLS_DP_REG,
  44. AARCH64_INSN_CLS_LDST,
  45. AARCH64_INSN_CLS_DP_FPSIMD,
  46. AARCH64_INSN_CLS_DP_IMM,
  47. AARCH64_INSN_CLS_DP_IMM,
  48. AARCH64_INSN_CLS_BR_SYS,
  49. AARCH64_INSN_CLS_BR_SYS,
  50. AARCH64_INSN_CLS_LDST,
  51. AARCH64_INSN_CLS_DP_REG,
  52. AARCH64_INSN_CLS_LDST,
  53. AARCH64_INSN_CLS_DP_FPSIMD,
  54. };
  55. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  56. {
  57. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  58. }
  59. /* NOP is an alias of HINT */
  60. bool __kprobes aarch64_insn_is_nop(u32 insn)
  61. {
  62. if (!aarch64_insn_is_hint(insn))
  63. return false;
  64. switch (insn & 0xFE0) {
  65. case AARCH64_INSN_HINT_YIELD:
  66. case AARCH64_INSN_HINT_WFE:
  67. case AARCH64_INSN_HINT_WFI:
  68. case AARCH64_INSN_HINT_SEV:
  69. case AARCH64_INSN_HINT_SEVL:
  70. return false;
  71. default:
  72. return true;
  73. }
  74. }
  75. bool aarch64_insn_is_branch_imm(u32 insn)
  76. {
  77. return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  78. aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  79. aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  80. aarch64_insn_is_bcond(insn));
  81. }
  82. static DEFINE_RAW_SPINLOCK(patch_lock);
  83. static void __kprobes *patch_map(void *addr, int fixmap)
  84. {
  85. unsigned long uintaddr = (uintptr_t) addr;
  86. bool module = !core_kernel_text(uintaddr);
  87. struct page *page;
  88. if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  89. page = vmalloc_to_page(addr);
  90. else if (!module)
  91. page = phys_to_page(__pa_symbol(addr));
  92. else
  93. return addr;
  94. BUG_ON(!page);
  95. return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  96. (uintaddr & ~PAGE_MASK));
  97. }
  98. static void __kprobes patch_unmap(int fixmap)
  99. {
  100. clear_fixmap(fixmap);
  101. }
  102. /*
  103. * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
  104. * little-endian.
  105. */
  106. int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
  107. {
  108. int ret;
  109. __le32 val;
  110. ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
  111. if (!ret)
  112. *insnp = le32_to_cpu(val);
  113. return ret;
  114. }
  115. static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
  116. {
  117. void *waddr = addr;
  118. unsigned long flags = 0;
  119. int ret;
  120. raw_spin_lock_irqsave(&patch_lock, flags);
  121. waddr = patch_map(addr, FIX_TEXT_POKE0);
  122. ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
  123. patch_unmap(FIX_TEXT_POKE0);
  124. raw_spin_unlock_irqrestore(&patch_lock, flags);
  125. return ret;
  126. }
  127. int __kprobes aarch64_insn_write(void *addr, u32 insn)
  128. {
  129. return __aarch64_insn_write(addr, cpu_to_le32(insn));
  130. }
  131. bool __kprobes aarch64_insn_uses_literal(u32 insn)
  132. {
  133. /* ldr/ldrsw (literal), prfm */
  134. return aarch64_insn_is_ldr_lit(insn) ||
  135. aarch64_insn_is_ldrsw_lit(insn) ||
  136. aarch64_insn_is_adr_adrp(insn) ||
  137. aarch64_insn_is_prfm_lit(insn);
  138. }
  139. bool __kprobes aarch64_insn_is_branch(u32 insn)
  140. {
  141. /* b, bl, cb*, tb*, b.cond, br, blr */
  142. return aarch64_insn_is_b(insn) ||
  143. aarch64_insn_is_bl(insn) ||
  144. aarch64_insn_is_cbz(insn) ||
  145. aarch64_insn_is_cbnz(insn) ||
  146. aarch64_insn_is_tbz(insn) ||
  147. aarch64_insn_is_tbnz(insn) ||
  148. aarch64_insn_is_ret(insn) ||
  149. aarch64_insn_is_br(insn) ||
  150. aarch64_insn_is_blr(insn) ||
  151. aarch64_insn_is_bcond(insn);
  152. }
  153. int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
  154. {
  155. u32 *tp = addr;
  156. int ret;
  157. /* A64 instructions must be word aligned */
  158. if ((uintptr_t)tp & 0x3)
  159. return -EINVAL;
  160. ret = aarch64_insn_write(tp, insn);
  161. if (ret == 0)
  162. __flush_icache_range((uintptr_t)tp,
  163. (uintptr_t)tp + AARCH64_INSN_SIZE);
  164. return ret;
  165. }
  166. struct aarch64_insn_patch {
  167. void **text_addrs;
  168. u32 *new_insns;
  169. int insn_cnt;
  170. atomic_t cpu_count;
  171. };
  172. static int __kprobes aarch64_insn_patch_text_cb(void *arg)
  173. {
  174. int i, ret = 0;
  175. struct aarch64_insn_patch *pp = arg;
  176. /* The first CPU becomes master */
  177. if (atomic_inc_return(&pp->cpu_count) == 1) {
  178. for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
  179. ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
  180. pp->new_insns[i]);
  181. /* Notify other processors with an additional increment. */
  182. atomic_inc(&pp->cpu_count);
  183. } else {
  184. while (atomic_read(&pp->cpu_count) <= num_online_cpus())
  185. cpu_relax();
  186. isb();
  187. }
  188. return ret;
  189. }
  190. int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
  191. {
  192. struct aarch64_insn_patch patch = {
  193. .text_addrs = addrs,
  194. .new_insns = insns,
  195. .insn_cnt = cnt,
  196. .cpu_count = ATOMIC_INIT(0),
  197. };
  198. if (cnt <= 0)
  199. return -EINVAL;
  200. return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
  201. cpu_online_mask);
  202. }
  203. static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
  204. u32 *maskp, int *shiftp)
  205. {
  206. u32 mask;
  207. int shift;
  208. switch (type) {
  209. case AARCH64_INSN_IMM_26:
  210. mask = BIT(26) - 1;
  211. shift = 0;
  212. break;
  213. case AARCH64_INSN_IMM_19:
  214. mask = BIT(19) - 1;
  215. shift = 5;
  216. break;
  217. case AARCH64_INSN_IMM_16:
  218. mask = BIT(16) - 1;
  219. shift = 5;
  220. break;
  221. case AARCH64_INSN_IMM_14:
  222. mask = BIT(14) - 1;
  223. shift = 5;
  224. break;
  225. case AARCH64_INSN_IMM_12:
  226. mask = BIT(12) - 1;
  227. shift = 10;
  228. break;
  229. case AARCH64_INSN_IMM_9:
  230. mask = BIT(9) - 1;
  231. shift = 12;
  232. break;
  233. case AARCH64_INSN_IMM_7:
  234. mask = BIT(7) - 1;
  235. shift = 15;
  236. break;
  237. case AARCH64_INSN_IMM_6:
  238. case AARCH64_INSN_IMM_S:
  239. mask = BIT(6) - 1;
  240. shift = 10;
  241. break;
  242. case AARCH64_INSN_IMM_R:
  243. mask = BIT(6) - 1;
  244. shift = 16;
  245. break;
  246. case AARCH64_INSN_IMM_N:
  247. mask = 1;
  248. shift = 22;
  249. break;
  250. default:
  251. return -EINVAL;
  252. }
  253. *maskp = mask;
  254. *shiftp = shift;
  255. return 0;
  256. }
  257. #define ADR_IMM_HILOSPLIT 2
  258. #define ADR_IMM_SIZE SZ_2M
  259. #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
  260. #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
  261. #define ADR_IMM_LOSHIFT 29
  262. #define ADR_IMM_HISHIFT 5
  263. u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
  264. {
  265. u32 immlo, immhi, mask;
  266. int shift;
  267. switch (type) {
  268. case AARCH64_INSN_IMM_ADR:
  269. shift = 0;
  270. immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
  271. immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
  272. insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
  273. mask = ADR_IMM_SIZE - 1;
  274. break;
  275. default:
  276. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  277. pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
  278. type);
  279. return 0;
  280. }
  281. }
  282. return (insn >> shift) & mask;
  283. }
  284. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  285. u32 insn, u64 imm)
  286. {
  287. u32 immlo, immhi, mask;
  288. int shift;
  289. if (insn == AARCH64_BREAK_FAULT)
  290. return AARCH64_BREAK_FAULT;
  291. switch (type) {
  292. case AARCH64_INSN_IMM_ADR:
  293. shift = 0;
  294. immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
  295. imm >>= ADR_IMM_HILOSPLIT;
  296. immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
  297. imm = immlo | immhi;
  298. mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
  299. (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
  300. break;
  301. default:
  302. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  303. pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
  304. type);
  305. return AARCH64_BREAK_FAULT;
  306. }
  307. }
  308. /* Update the immediate field. */
  309. insn &= ~(mask << shift);
  310. insn |= (imm & mask) << shift;
  311. return insn;
  312. }
  313. u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
  314. u32 insn)
  315. {
  316. int shift;
  317. switch (type) {
  318. case AARCH64_INSN_REGTYPE_RT:
  319. case AARCH64_INSN_REGTYPE_RD:
  320. shift = 0;
  321. break;
  322. case AARCH64_INSN_REGTYPE_RN:
  323. shift = 5;
  324. break;
  325. case AARCH64_INSN_REGTYPE_RT2:
  326. case AARCH64_INSN_REGTYPE_RA:
  327. shift = 10;
  328. break;
  329. case AARCH64_INSN_REGTYPE_RM:
  330. shift = 16;
  331. break;
  332. default:
  333. pr_err("%s: unknown register type encoding %d\n", __func__,
  334. type);
  335. return 0;
  336. }
  337. return (insn >> shift) & GENMASK(4, 0);
  338. }
  339. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  340. u32 insn,
  341. enum aarch64_insn_register reg)
  342. {
  343. int shift;
  344. if (insn == AARCH64_BREAK_FAULT)
  345. return AARCH64_BREAK_FAULT;
  346. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  347. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  348. return AARCH64_BREAK_FAULT;
  349. }
  350. switch (type) {
  351. case AARCH64_INSN_REGTYPE_RT:
  352. case AARCH64_INSN_REGTYPE_RD:
  353. shift = 0;
  354. break;
  355. case AARCH64_INSN_REGTYPE_RN:
  356. shift = 5;
  357. break;
  358. case AARCH64_INSN_REGTYPE_RT2:
  359. case AARCH64_INSN_REGTYPE_RA:
  360. shift = 10;
  361. break;
  362. case AARCH64_INSN_REGTYPE_RM:
  363. case AARCH64_INSN_REGTYPE_RS:
  364. shift = 16;
  365. break;
  366. default:
  367. pr_err("%s: unknown register type encoding %d\n", __func__,
  368. type);
  369. return AARCH64_BREAK_FAULT;
  370. }
  371. insn &= ~(GENMASK(4, 0) << shift);
  372. insn |= reg << shift;
  373. return insn;
  374. }
  375. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  376. u32 insn)
  377. {
  378. u32 size;
  379. switch (type) {
  380. case AARCH64_INSN_SIZE_8:
  381. size = 0;
  382. break;
  383. case AARCH64_INSN_SIZE_16:
  384. size = 1;
  385. break;
  386. case AARCH64_INSN_SIZE_32:
  387. size = 2;
  388. break;
  389. case AARCH64_INSN_SIZE_64:
  390. size = 3;
  391. break;
  392. default:
  393. pr_err("%s: unknown size encoding %d\n", __func__, type);
  394. return AARCH64_BREAK_FAULT;
  395. }
  396. insn &= ~GENMASK(31, 30);
  397. insn |= size << 30;
  398. return insn;
  399. }
  400. static inline long branch_imm_common(unsigned long pc, unsigned long addr,
  401. long range)
  402. {
  403. long offset;
  404. if ((pc & 0x3) || (addr & 0x3)) {
  405. pr_err("%s: A64 instructions must be word aligned\n", __func__);
  406. return range;
  407. }
  408. offset = ((long)addr - (long)pc);
  409. if (offset < -range || offset >= range) {
  410. pr_err("%s: offset out of range\n", __func__);
  411. return range;
  412. }
  413. return offset;
  414. }
  415. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  416. enum aarch64_insn_branch_type type)
  417. {
  418. u32 insn;
  419. long offset;
  420. /*
  421. * B/BL support [-128M, 128M) offset
  422. * ARM64 virtual address arrangement guarantees all kernel and module
  423. * texts are within +/-128M.
  424. */
  425. offset = branch_imm_common(pc, addr, SZ_128M);
  426. if (offset >= SZ_128M)
  427. return AARCH64_BREAK_FAULT;
  428. switch (type) {
  429. case AARCH64_INSN_BRANCH_LINK:
  430. insn = aarch64_insn_get_bl_value();
  431. break;
  432. case AARCH64_INSN_BRANCH_NOLINK:
  433. insn = aarch64_insn_get_b_value();
  434. break;
  435. default:
  436. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  437. return AARCH64_BREAK_FAULT;
  438. }
  439. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  440. offset >> 2);
  441. }
  442. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  443. enum aarch64_insn_register reg,
  444. enum aarch64_insn_variant variant,
  445. enum aarch64_insn_branch_type type)
  446. {
  447. u32 insn;
  448. long offset;
  449. offset = branch_imm_common(pc, addr, SZ_1M);
  450. if (offset >= SZ_1M)
  451. return AARCH64_BREAK_FAULT;
  452. switch (type) {
  453. case AARCH64_INSN_BRANCH_COMP_ZERO:
  454. insn = aarch64_insn_get_cbz_value();
  455. break;
  456. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  457. insn = aarch64_insn_get_cbnz_value();
  458. break;
  459. default:
  460. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  461. return AARCH64_BREAK_FAULT;
  462. }
  463. switch (variant) {
  464. case AARCH64_INSN_VARIANT_32BIT:
  465. break;
  466. case AARCH64_INSN_VARIANT_64BIT:
  467. insn |= AARCH64_INSN_SF_BIT;
  468. break;
  469. default:
  470. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  471. return AARCH64_BREAK_FAULT;
  472. }
  473. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  474. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  475. offset >> 2);
  476. }
  477. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  478. enum aarch64_insn_condition cond)
  479. {
  480. u32 insn;
  481. long offset;
  482. offset = branch_imm_common(pc, addr, SZ_1M);
  483. insn = aarch64_insn_get_bcond_value();
  484. if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
  485. pr_err("%s: unknown condition encoding %d\n", __func__, cond);
  486. return AARCH64_BREAK_FAULT;
  487. }
  488. insn |= cond;
  489. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  490. offset >> 2);
  491. }
  492. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
  493. {
  494. return aarch64_insn_get_hint_value() | op;
  495. }
  496. u32 __kprobes aarch64_insn_gen_nop(void)
  497. {
  498. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  499. }
  500. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  501. enum aarch64_insn_branch_type type)
  502. {
  503. u32 insn;
  504. switch (type) {
  505. case AARCH64_INSN_BRANCH_NOLINK:
  506. insn = aarch64_insn_get_br_value();
  507. break;
  508. case AARCH64_INSN_BRANCH_LINK:
  509. insn = aarch64_insn_get_blr_value();
  510. break;
  511. case AARCH64_INSN_BRANCH_RETURN:
  512. insn = aarch64_insn_get_ret_value();
  513. break;
  514. default:
  515. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  516. return AARCH64_BREAK_FAULT;
  517. }
  518. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  519. }
  520. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  521. enum aarch64_insn_register base,
  522. enum aarch64_insn_register offset,
  523. enum aarch64_insn_size_type size,
  524. enum aarch64_insn_ldst_type type)
  525. {
  526. u32 insn;
  527. switch (type) {
  528. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  529. insn = aarch64_insn_get_ldr_reg_value();
  530. break;
  531. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  532. insn = aarch64_insn_get_str_reg_value();
  533. break;
  534. default:
  535. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  536. return AARCH64_BREAK_FAULT;
  537. }
  538. insn = aarch64_insn_encode_ldst_size(size, insn);
  539. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  540. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  541. base);
  542. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  543. offset);
  544. }
  545. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  546. enum aarch64_insn_register reg2,
  547. enum aarch64_insn_register base,
  548. int offset,
  549. enum aarch64_insn_variant variant,
  550. enum aarch64_insn_ldst_type type)
  551. {
  552. u32 insn;
  553. int shift;
  554. switch (type) {
  555. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  556. insn = aarch64_insn_get_ldp_pre_value();
  557. break;
  558. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  559. insn = aarch64_insn_get_stp_pre_value();
  560. break;
  561. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  562. insn = aarch64_insn_get_ldp_post_value();
  563. break;
  564. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  565. insn = aarch64_insn_get_stp_post_value();
  566. break;
  567. default:
  568. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  569. return AARCH64_BREAK_FAULT;
  570. }
  571. switch (variant) {
  572. case AARCH64_INSN_VARIANT_32BIT:
  573. if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
  574. pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
  575. __func__, offset);
  576. return AARCH64_BREAK_FAULT;
  577. }
  578. shift = 2;
  579. break;
  580. case AARCH64_INSN_VARIANT_64BIT:
  581. if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
  582. pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
  583. __func__, offset);
  584. return AARCH64_BREAK_FAULT;
  585. }
  586. shift = 3;
  587. insn |= AARCH64_INSN_SF_BIT;
  588. break;
  589. default:
  590. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  591. return AARCH64_BREAK_FAULT;
  592. }
  593. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  594. reg1);
  595. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  596. reg2);
  597. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  598. base);
  599. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  600. offset >> shift);
  601. }
  602. u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
  603. enum aarch64_insn_register base,
  604. enum aarch64_insn_register state,
  605. enum aarch64_insn_size_type size,
  606. enum aarch64_insn_ldst_type type)
  607. {
  608. u32 insn;
  609. switch (type) {
  610. case AARCH64_INSN_LDST_LOAD_EX:
  611. insn = aarch64_insn_get_load_ex_value();
  612. break;
  613. case AARCH64_INSN_LDST_STORE_EX:
  614. insn = aarch64_insn_get_store_ex_value();
  615. break;
  616. default:
  617. pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
  618. return AARCH64_BREAK_FAULT;
  619. }
  620. insn = aarch64_insn_encode_ldst_size(size, insn);
  621. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  622. reg);
  623. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  624. base);
  625. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  626. AARCH64_INSN_REG_ZR);
  627. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  628. state);
  629. }
  630. u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
  631. enum aarch64_insn_register address,
  632. enum aarch64_insn_register value,
  633. enum aarch64_insn_size_type size)
  634. {
  635. u32 insn = aarch64_insn_get_ldadd_value();
  636. switch (size) {
  637. case AARCH64_INSN_SIZE_32:
  638. case AARCH64_INSN_SIZE_64:
  639. break;
  640. default:
  641. pr_err("%s: unimplemented size encoding %d\n", __func__, size);
  642. return AARCH64_BREAK_FAULT;
  643. }
  644. insn = aarch64_insn_encode_ldst_size(size, insn);
  645. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  646. result);
  647. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  648. address);
  649. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  650. value);
  651. }
  652. u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
  653. enum aarch64_insn_register value,
  654. enum aarch64_insn_size_type size)
  655. {
  656. /*
  657. * STADD is simply encoded as an alias for LDADD with XZR as
  658. * the destination register.
  659. */
  660. return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
  661. value, size);
  662. }
  663. static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
  664. enum aarch64_insn_prfm_target target,
  665. enum aarch64_insn_prfm_policy policy,
  666. u32 insn)
  667. {
  668. u32 imm_type = 0, imm_target = 0, imm_policy = 0;
  669. switch (type) {
  670. case AARCH64_INSN_PRFM_TYPE_PLD:
  671. break;
  672. case AARCH64_INSN_PRFM_TYPE_PLI:
  673. imm_type = BIT(0);
  674. break;
  675. case AARCH64_INSN_PRFM_TYPE_PST:
  676. imm_type = BIT(1);
  677. break;
  678. default:
  679. pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
  680. return AARCH64_BREAK_FAULT;
  681. }
  682. switch (target) {
  683. case AARCH64_INSN_PRFM_TARGET_L1:
  684. break;
  685. case AARCH64_INSN_PRFM_TARGET_L2:
  686. imm_target = BIT(0);
  687. break;
  688. case AARCH64_INSN_PRFM_TARGET_L3:
  689. imm_target = BIT(1);
  690. break;
  691. default:
  692. pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
  693. return AARCH64_BREAK_FAULT;
  694. }
  695. switch (policy) {
  696. case AARCH64_INSN_PRFM_POLICY_KEEP:
  697. break;
  698. case AARCH64_INSN_PRFM_POLICY_STRM:
  699. imm_policy = BIT(0);
  700. break;
  701. default:
  702. pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
  703. return AARCH64_BREAK_FAULT;
  704. }
  705. /* In this case, imm5 is encoded into Rt field. */
  706. insn &= ~GENMASK(4, 0);
  707. insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
  708. return insn;
  709. }
  710. u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
  711. enum aarch64_insn_prfm_type type,
  712. enum aarch64_insn_prfm_target target,
  713. enum aarch64_insn_prfm_policy policy)
  714. {
  715. u32 insn = aarch64_insn_get_prfm_value();
  716. insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
  717. insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
  718. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  719. base);
  720. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
  721. }
  722. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  723. enum aarch64_insn_register src,
  724. int imm, enum aarch64_insn_variant variant,
  725. enum aarch64_insn_adsb_type type)
  726. {
  727. u32 insn;
  728. switch (type) {
  729. case AARCH64_INSN_ADSB_ADD:
  730. insn = aarch64_insn_get_add_imm_value();
  731. break;
  732. case AARCH64_INSN_ADSB_SUB:
  733. insn = aarch64_insn_get_sub_imm_value();
  734. break;
  735. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  736. insn = aarch64_insn_get_adds_imm_value();
  737. break;
  738. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  739. insn = aarch64_insn_get_subs_imm_value();
  740. break;
  741. default:
  742. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  743. return AARCH64_BREAK_FAULT;
  744. }
  745. switch (variant) {
  746. case AARCH64_INSN_VARIANT_32BIT:
  747. break;
  748. case AARCH64_INSN_VARIANT_64BIT:
  749. insn |= AARCH64_INSN_SF_BIT;
  750. break;
  751. default:
  752. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  753. return AARCH64_BREAK_FAULT;
  754. }
  755. /* We can't encode more than a 24bit value (12bit + 12bit shift) */
  756. if (imm & ~(BIT(24) - 1))
  757. goto out;
  758. /* If we have something in the top 12 bits... */
  759. if (imm & ~(SZ_4K - 1)) {
  760. /* ... and in the low 12 bits -> error */
  761. if (imm & (SZ_4K - 1))
  762. goto out;
  763. imm >>= 12;
  764. insn |= AARCH64_INSN_LSL_12;
  765. }
  766. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  767. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  768. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  769. out:
  770. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  771. return AARCH64_BREAK_FAULT;
  772. }
  773. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  774. enum aarch64_insn_register src,
  775. int immr, int imms,
  776. enum aarch64_insn_variant variant,
  777. enum aarch64_insn_bitfield_type type)
  778. {
  779. u32 insn;
  780. u32 mask;
  781. switch (type) {
  782. case AARCH64_INSN_BITFIELD_MOVE:
  783. insn = aarch64_insn_get_bfm_value();
  784. break;
  785. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  786. insn = aarch64_insn_get_ubfm_value();
  787. break;
  788. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  789. insn = aarch64_insn_get_sbfm_value();
  790. break;
  791. default:
  792. pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
  793. return AARCH64_BREAK_FAULT;
  794. }
  795. switch (variant) {
  796. case AARCH64_INSN_VARIANT_32BIT:
  797. mask = GENMASK(4, 0);
  798. break;
  799. case AARCH64_INSN_VARIANT_64BIT:
  800. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  801. mask = GENMASK(5, 0);
  802. break;
  803. default:
  804. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  805. return AARCH64_BREAK_FAULT;
  806. }
  807. if (immr & ~mask) {
  808. pr_err("%s: invalid immr encoding %d\n", __func__, immr);
  809. return AARCH64_BREAK_FAULT;
  810. }
  811. if (imms & ~mask) {
  812. pr_err("%s: invalid imms encoding %d\n", __func__, imms);
  813. return AARCH64_BREAK_FAULT;
  814. }
  815. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  816. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  817. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  818. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  819. }
  820. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  821. int imm, int shift,
  822. enum aarch64_insn_variant variant,
  823. enum aarch64_insn_movewide_type type)
  824. {
  825. u32 insn;
  826. switch (type) {
  827. case AARCH64_INSN_MOVEWIDE_ZERO:
  828. insn = aarch64_insn_get_movz_value();
  829. break;
  830. case AARCH64_INSN_MOVEWIDE_KEEP:
  831. insn = aarch64_insn_get_movk_value();
  832. break;
  833. case AARCH64_INSN_MOVEWIDE_INVERSE:
  834. insn = aarch64_insn_get_movn_value();
  835. break;
  836. default:
  837. pr_err("%s: unknown movewide encoding %d\n", __func__, type);
  838. return AARCH64_BREAK_FAULT;
  839. }
  840. if (imm & ~(SZ_64K - 1)) {
  841. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  842. return AARCH64_BREAK_FAULT;
  843. }
  844. switch (variant) {
  845. case AARCH64_INSN_VARIANT_32BIT:
  846. if (shift != 0 && shift != 16) {
  847. pr_err("%s: invalid shift encoding %d\n", __func__,
  848. shift);
  849. return AARCH64_BREAK_FAULT;
  850. }
  851. break;
  852. case AARCH64_INSN_VARIANT_64BIT:
  853. insn |= AARCH64_INSN_SF_BIT;
  854. if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
  855. pr_err("%s: invalid shift encoding %d\n", __func__,
  856. shift);
  857. return AARCH64_BREAK_FAULT;
  858. }
  859. break;
  860. default:
  861. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  862. return AARCH64_BREAK_FAULT;
  863. }
  864. insn |= (shift >> 4) << 21;
  865. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  866. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  867. }
  868. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  869. enum aarch64_insn_register src,
  870. enum aarch64_insn_register reg,
  871. int shift,
  872. enum aarch64_insn_variant variant,
  873. enum aarch64_insn_adsb_type type)
  874. {
  875. u32 insn;
  876. switch (type) {
  877. case AARCH64_INSN_ADSB_ADD:
  878. insn = aarch64_insn_get_add_value();
  879. break;
  880. case AARCH64_INSN_ADSB_SUB:
  881. insn = aarch64_insn_get_sub_value();
  882. break;
  883. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  884. insn = aarch64_insn_get_adds_value();
  885. break;
  886. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  887. insn = aarch64_insn_get_subs_value();
  888. break;
  889. default:
  890. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  891. return AARCH64_BREAK_FAULT;
  892. }
  893. switch (variant) {
  894. case AARCH64_INSN_VARIANT_32BIT:
  895. if (shift & ~(SZ_32 - 1)) {
  896. pr_err("%s: invalid shift encoding %d\n", __func__,
  897. shift);
  898. return AARCH64_BREAK_FAULT;
  899. }
  900. break;
  901. case AARCH64_INSN_VARIANT_64BIT:
  902. insn |= AARCH64_INSN_SF_BIT;
  903. if (shift & ~(SZ_64 - 1)) {
  904. pr_err("%s: invalid shift encoding %d\n", __func__,
  905. shift);
  906. return AARCH64_BREAK_FAULT;
  907. }
  908. break;
  909. default:
  910. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  911. return AARCH64_BREAK_FAULT;
  912. }
  913. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  914. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  915. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  916. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  917. }
  918. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  919. enum aarch64_insn_register src,
  920. enum aarch64_insn_variant variant,
  921. enum aarch64_insn_data1_type type)
  922. {
  923. u32 insn;
  924. switch (type) {
  925. case AARCH64_INSN_DATA1_REVERSE_16:
  926. insn = aarch64_insn_get_rev16_value();
  927. break;
  928. case AARCH64_INSN_DATA1_REVERSE_32:
  929. insn = aarch64_insn_get_rev32_value();
  930. break;
  931. case AARCH64_INSN_DATA1_REVERSE_64:
  932. if (variant != AARCH64_INSN_VARIANT_64BIT) {
  933. pr_err("%s: invalid variant for reverse64 %d\n",
  934. __func__, variant);
  935. return AARCH64_BREAK_FAULT;
  936. }
  937. insn = aarch64_insn_get_rev64_value();
  938. break;
  939. default:
  940. pr_err("%s: unknown data1 encoding %d\n", __func__, type);
  941. return AARCH64_BREAK_FAULT;
  942. }
  943. switch (variant) {
  944. case AARCH64_INSN_VARIANT_32BIT:
  945. break;
  946. case AARCH64_INSN_VARIANT_64BIT:
  947. insn |= AARCH64_INSN_SF_BIT;
  948. break;
  949. default:
  950. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  951. return AARCH64_BREAK_FAULT;
  952. }
  953. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  954. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  955. }
  956. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  957. enum aarch64_insn_register src,
  958. enum aarch64_insn_register reg,
  959. enum aarch64_insn_variant variant,
  960. enum aarch64_insn_data2_type type)
  961. {
  962. u32 insn;
  963. switch (type) {
  964. case AARCH64_INSN_DATA2_UDIV:
  965. insn = aarch64_insn_get_udiv_value();
  966. break;
  967. case AARCH64_INSN_DATA2_SDIV:
  968. insn = aarch64_insn_get_sdiv_value();
  969. break;
  970. case AARCH64_INSN_DATA2_LSLV:
  971. insn = aarch64_insn_get_lslv_value();
  972. break;
  973. case AARCH64_INSN_DATA2_LSRV:
  974. insn = aarch64_insn_get_lsrv_value();
  975. break;
  976. case AARCH64_INSN_DATA2_ASRV:
  977. insn = aarch64_insn_get_asrv_value();
  978. break;
  979. case AARCH64_INSN_DATA2_RORV:
  980. insn = aarch64_insn_get_rorv_value();
  981. break;
  982. default:
  983. pr_err("%s: unknown data2 encoding %d\n", __func__, type);
  984. return AARCH64_BREAK_FAULT;
  985. }
  986. switch (variant) {
  987. case AARCH64_INSN_VARIANT_32BIT:
  988. break;
  989. case AARCH64_INSN_VARIANT_64BIT:
  990. insn |= AARCH64_INSN_SF_BIT;
  991. break;
  992. default:
  993. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  994. return AARCH64_BREAK_FAULT;
  995. }
  996. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  997. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  998. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  999. }
  1000. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  1001. enum aarch64_insn_register src,
  1002. enum aarch64_insn_register reg1,
  1003. enum aarch64_insn_register reg2,
  1004. enum aarch64_insn_variant variant,
  1005. enum aarch64_insn_data3_type type)
  1006. {
  1007. u32 insn;
  1008. switch (type) {
  1009. case AARCH64_INSN_DATA3_MADD:
  1010. insn = aarch64_insn_get_madd_value();
  1011. break;
  1012. case AARCH64_INSN_DATA3_MSUB:
  1013. insn = aarch64_insn_get_msub_value();
  1014. break;
  1015. default:
  1016. pr_err("%s: unknown data3 encoding %d\n", __func__, type);
  1017. return AARCH64_BREAK_FAULT;
  1018. }
  1019. switch (variant) {
  1020. case AARCH64_INSN_VARIANT_32BIT:
  1021. break;
  1022. case AARCH64_INSN_VARIANT_64BIT:
  1023. insn |= AARCH64_INSN_SF_BIT;
  1024. break;
  1025. default:
  1026. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1027. return AARCH64_BREAK_FAULT;
  1028. }
  1029. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1030. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  1031. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  1032. reg1);
  1033. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  1034. reg2);
  1035. }
  1036. u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
  1037. enum aarch64_insn_register src,
  1038. enum aarch64_insn_register reg,
  1039. int shift,
  1040. enum aarch64_insn_variant variant,
  1041. enum aarch64_insn_logic_type type)
  1042. {
  1043. u32 insn;
  1044. switch (type) {
  1045. case AARCH64_INSN_LOGIC_AND:
  1046. insn = aarch64_insn_get_and_value();
  1047. break;
  1048. case AARCH64_INSN_LOGIC_BIC:
  1049. insn = aarch64_insn_get_bic_value();
  1050. break;
  1051. case AARCH64_INSN_LOGIC_ORR:
  1052. insn = aarch64_insn_get_orr_value();
  1053. break;
  1054. case AARCH64_INSN_LOGIC_ORN:
  1055. insn = aarch64_insn_get_orn_value();
  1056. break;
  1057. case AARCH64_INSN_LOGIC_EOR:
  1058. insn = aarch64_insn_get_eor_value();
  1059. break;
  1060. case AARCH64_INSN_LOGIC_EON:
  1061. insn = aarch64_insn_get_eon_value();
  1062. break;
  1063. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1064. insn = aarch64_insn_get_ands_value();
  1065. break;
  1066. case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
  1067. insn = aarch64_insn_get_bics_value();
  1068. break;
  1069. default:
  1070. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1071. return AARCH64_BREAK_FAULT;
  1072. }
  1073. switch (variant) {
  1074. case AARCH64_INSN_VARIANT_32BIT:
  1075. if (shift & ~(SZ_32 - 1)) {
  1076. pr_err("%s: invalid shift encoding %d\n", __func__,
  1077. shift);
  1078. return AARCH64_BREAK_FAULT;
  1079. }
  1080. break;
  1081. case AARCH64_INSN_VARIANT_64BIT:
  1082. insn |= AARCH64_INSN_SF_BIT;
  1083. if (shift & ~(SZ_64 - 1)) {
  1084. pr_err("%s: invalid shift encoding %d\n", __func__,
  1085. shift);
  1086. return AARCH64_BREAK_FAULT;
  1087. }
  1088. break;
  1089. default:
  1090. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1091. return AARCH64_BREAK_FAULT;
  1092. }
  1093. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1094. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  1095. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  1096. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  1097. }
  1098. /*
  1099. * Decode the imm field of a branch, and return the byte offset as a
  1100. * signed value (so it can be used when computing a new branch
  1101. * target).
  1102. */
  1103. s32 aarch64_get_branch_offset(u32 insn)
  1104. {
  1105. s32 imm;
  1106. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
  1107. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
  1108. return (imm << 6) >> 4;
  1109. }
  1110. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1111. aarch64_insn_is_bcond(insn)) {
  1112. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
  1113. return (imm << 13) >> 11;
  1114. }
  1115. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
  1116. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
  1117. return (imm << 18) >> 16;
  1118. }
  1119. /* Unhandled instruction */
  1120. BUG();
  1121. }
  1122. /*
  1123. * Encode the displacement of a branch in the imm field and return the
  1124. * updated instruction.
  1125. */
  1126. u32 aarch64_set_branch_offset(u32 insn, s32 offset)
  1127. {
  1128. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
  1129. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  1130. offset >> 2);
  1131. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1132. aarch64_insn_is_bcond(insn))
  1133. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  1134. offset >> 2);
  1135. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
  1136. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
  1137. offset >> 2);
  1138. /* Unhandled instruction */
  1139. BUG();
  1140. }
  1141. s32 aarch64_insn_adrp_get_offset(u32 insn)
  1142. {
  1143. BUG_ON(!aarch64_insn_is_adrp(insn));
  1144. return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
  1145. }
  1146. u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
  1147. {
  1148. BUG_ON(!aarch64_insn_is_adrp(insn));
  1149. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
  1150. offset >> 12);
  1151. }
  1152. /*
  1153. * Extract the Op/CR data from a msr/mrs instruction.
  1154. */
  1155. u32 aarch64_insn_extract_system_reg(u32 insn)
  1156. {
  1157. return (insn & 0x1FFFE0) >> 5;
  1158. }
  1159. bool aarch32_insn_is_wide(u32 insn)
  1160. {
  1161. return insn >= 0xe800;
  1162. }
  1163. /*
  1164. * Macros/defines for extracting register numbers from instruction.
  1165. */
  1166. u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
  1167. {
  1168. return (insn & (0xf << offset)) >> offset;
  1169. }
  1170. #define OPC2_MASK 0x7
  1171. #define OPC2_OFFSET 5
  1172. u32 aarch32_insn_mcr_extract_opc2(u32 insn)
  1173. {
  1174. return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
  1175. }
  1176. #define CRM_MASK 0xf
  1177. u32 aarch32_insn_mcr_extract_crm(u32 insn)
  1178. {
  1179. return insn & CRM_MASK;
  1180. }
  1181. static bool __kprobes __check_eq(unsigned long pstate)
  1182. {
  1183. return (pstate & PSR_Z_BIT) != 0;
  1184. }
  1185. static bool __kprobes __check_ne(unsigned long pstate)
  1186. {
  1187. return (pstate & PSR_Z_BIT) == 0;
  1188. }
  1189. static bool __kprobes __check_cs(unsigned long pstate)
  1190. {
  1191. return (pstate & PSR_C_BIT) != 0;
  1192. }
  1193. static bool __kprobes __check_cc(unsigned long pstate)
  1194. {
  1195. return (pstate & PSR_C_BIT) == 0;
  1196. }
  1197. static bool __kprobes __check_mi(unsigned long pstate)
  1198. {
  1199. return (pstate & PSR_N_BIT) != 0;
  1200. }
  1201. static bool __kprobes __check_pl(unsigned long pstate)
  1202. {
  1203. return (pstate & PSR_N_BIT) == 0;
  1204. }
  1205. static bool __kprobes __check_vs(unsigned long pstate)
  1206. {
  1207. return (pstate & PSR_V_BIT) != 0;
  1208. }
  1209. static bool __kprobes __check_vc(unsigned long pstate)
  1210. {
  1211. return (pstate & PSR_V_BIT) == 0;
  1212. }
  1213. static bool __kprobes __check_hi(unsigned long pstate)
  1214. {
  1215. pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
  1216. return (pstate & PSR_C_BIT) != 0;
  1217. }
  1218. static bool __kprobes __check_ls(unsigned long pstate)
  1219. {
  1220. pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
  1221. return (pstate & PSR_C_BIT) == 0;
  1222. }
  1223. static bool __kprobes __check_ge(unsigned long pstate)
  1224. {
  1225. pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
  1226. return (pstate & PSR_N_BIT) == 0;
  1227. }
  1228. static bool __kprobes __check_lt(unsigned long pstate)
  1229. {
  1230. pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
  1231. return (pstate & PSR_N_BIT) != 0;
  1232. }
  1233. static bool __kprobes __check_gt(unsigned long pstate)
  1234. {
  1235. /*PSR_N_BIT ^= PSR_V_BIT */
  1236. unsigned long temp = pstate ^ (pstate << 3);
  1237. temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
  1238. return (temp & PSR_N_BIT) == 0;
  1239. }
  1240. static bool __kprobes __check_le(unsigned long pstate)
  1241. {
  1242. /*PSR_N_BIT ^= PSR_V_BIT */
  1243. unsigned long temp = pstate ^ (pstate << 3);
  1244. temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
  1245. return (temp & PSR_N_BIT) != 0;
  1246. }
  1247. static bool __kprobes __check_al(unsigned long pstate)
  1248. {
  1249. return true;
  1250. }
  1251. /*
  1252. * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
  1253. * it behaves identically to 0b1110 ("al").
  1254. */
  1255. pstate_check_t * const aarch32_opcode_cond_checks[16] = {
  1256. __check_eq, __check_ne, __check_cs, __check_cc,
  1257. __check_mi, __check_pl, __check_vs, __check_vc,
  1258. __check_hi, __check_ls, __check_ge, __check_lt,
  1259. __check_gt, __check_le, __check_al, __check_al
  1260. };
  1261. static bool range_of_ones(u64 val)
  1262. {
  1263. /* Doesn't handle full ones or full zeroes */
  1264. u64 sval = val >> __ffs64(val);
  1265. /* One of Sean Eron Anderson's bithack tricks */
  1266. return ((sval + 1) & (sval)) == 0;
  1267. }
  1268. static u32 aarch64_encode_immediate(u64 imm,
  1269. enum aarch64_insn_variant variant,
  1270. u32 insn)
  1271. {
  1272. unsigned int immr, imms, n, ones, ror, esz, tmp;
  1273. u64 mask;
  1274. switch (variant) {
  1275. case AARCH64_INSN_VARIANT_32BIT:
  1276. esz = 32;
  1277. break;
  1278. case AARCH64_INSN_VARIANT_64BIT:
  1279. insn |= AARCH64_INSN_SF_BIT;
  1280. esz = 64;
  1281. break;
  1282. default:
  1283. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1284. return AARCH64_BREAK_FAULT;
  1285. }
  1286. mask = GENMASK(esz - 1, 0);
  1287. /* Can't encode full zeroes, full ones, or value wider than the mask */
  1288. if (!imm || imm == mask || imm & ~mask)
  1289. return AARCH64_BREAK_FAULT;
  1290. /*
  1291. * Inverse of Replicate(). Try to spot a repeating pattern
  1292. * with a pow2 stride.
  1293. */
  1294. for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
  1295. u64 emask = BIT(tmp) - 1;
  1296. if ((imm & emask) != ((imm >> tmp) & emask))
  1297. break;
  1298. esz = tmp;
  1299. mask = emask;
  1300. }
  1301. /* N is only set if we're encoding a 64bit value */
  1302. n = esz == 64;
  1303. /* Trim imm to the element size */
  1304. imm &= mask;
  1305. /* That's how many ones we need to encode */
  1306. ones = hweight64(imm);
  1307. /*
  1308. * imms is set to (ones - 1), prefixed with a string of ones
  1309. * and a zero if they fit. Cap it to 6 bits.
  1310. */
  1311. imms = ones - 1;
  1312. imms |= 0xf << ffs(esz);
  1313. imms &= BIT(6) - 1;
  1314. /* Compute the rotation */
  1315. if (range_of_ones(imm)) {
  1316. /*
  1317. * Pattern: 0..01..10..0
  1318. *
  1319. * Compute how many rotate we need to align it right
  1320. */
  1321. ror = __ffs64(imm);
  1322. } else {
  1323. /*
  1324. * Pattern: 0..01..10..01..1
  1325. *
  1326. * Fill the unused top bits with ones, and check if
  1327. * the result is a valid immediate (all ones with a
  1328. * contiguous ranges of zeroes).
  1329. */
  1330. imm |= ~mask;
  1331. if (!range_of_ones(~imm))
  1332. return AARCH64_BREAK_FAULT;
  1333. /*
  1334. * Compute the rotation to get a continuous set of
  1335. * ones, with the first bit set at position 0
  1336. */
  1337. ror = fls(~imm);
  1338. }
  1339. /*
  1340. * immr is the number of bits we need to rotate back to the
  1341. * original set of ones. Note that this is relative to the
  1342. * element size...
  1343. */
  1344. immr = (esz - ror) % esz;
  1345. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
  1346. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  1347. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  1348. }
  1349. u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
  1350. enum aarch64_insn_variant variant,
  1351. enum aarch64_insn_register Rn,
  1352. enum aarch64_insn_register Rd,
  1353. u64 imm)
  1354. {
  1355. u32 insn;
  1356. switch (type) {
  1357. case AARCH64_INSN_LOGIC_AND:
  1358. insn = aarch64_insn_get_and_imm_value();
  1359. break;
  1360. case AARCH64_INSN_LOGIC_ORR:
  1361. insn = aarch64_insn_get_orr_imm_value();
  1362. break;
  1363. case AARCH64_INSN_LOGIC_EOR:
  1364. insn = aarch64_insn_get_eor_imm_value();
  1365. break;
  1366. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1367. insn = aarch64_insn_get_ands_imm_value();
  1368. break;
  1369. default:
  1370. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1371. return AARCH64_BREAK_FAULT;
  1372. }
  1373. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1374. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1375. return aarch64_encode_immediate(imm, variant, insn);
  1376. }
  1377. u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
  1378. enum aarch64_insn_register Rm,
  1379. enum aarch64_insn_register Rn,
  1380. enum aarch64_insn_register Rd,
  1381. u8 lsb)
  1382. {
  1383. u32 insn;
  1384. insn = aarch64_insn_get_extr_value();
  1385. switch (variant) {
  1386. case AARCH64_INSN_VARIANT_32BIT:
  1387. if (lsb > 31)
  1388. return AARCH64_BREAK_FAULT;
  1389. break;
  1390. case AARCH64_INSN_VARIANT_64BIT:
  1391. if (lsb > 63)
  1392. return AARCH64_BREAK_FAULT;
  1393. insn |= AARCH64_INSN_SF_BIT;
  1394. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
  1395. break;
  1396. default:
  1397. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1398. return AARCH64_BREAK_FAULT;
  1399. }
  1400. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
  1401. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1402. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1403. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
  1404. }